diff --git a/clang/include/clang/Basic/arm_mve.td b/clang/include/clang/Basic/arm_mve.td --- a/clang/include/clang/Basic/arm_mve.td +++ b/clang/include/clang/Basic/arm_mve.td @@ -72,6 +72,51 @@ } // loop over half = "b", "t" +multiclass compare_with_pred { + // Make the predicated and unpredicated versions of a single comparison. + def: Intrinsic cmp))>, + NameOverride<"vcmp" # condname # "q" # suffix>; + def: Intrinsic (and $inpred, cmp)))>, + NameOverride<"vcmp" # condname # "q_m" # suffix>; +} + +multiclass compare { + // Make all four variants of a comparison: the vector/vector and + // vector/scalar forms, each using compare_with_pred to make a + // predicated and unpredicated version. + defm: compare_with_pred; + let pnt = PNT_NType in { + defm: compare_with_pred:$sb), + (cmpop $va, (dup $sb)), "_n">; + } +} +let params = T.Int in { + defm: compare<"eq", icmp_eq>; + defm: compare<"ne", icmp_ne>; +} +let params = T.Signed in { + defm: compare<"gt", icmp_sgt>; + defm: compare<"ge", icmp_sge>; + defm: compare<"lt", icmp_slt>; + defm: compare<"le", icmp_sle>; +} +let params = T.Unsigned in { + defm: compare<"hi", icmp_ugt>; + defm: compare<"cs", icmp_uge>; +} +let params = T.Float in { + defm: compare<"eq", fcmp_eq>; + defm: compare<"ne", fcmp_ne>; + defm: compare<"gt", fcmp_gt>; + defm: compare<"ge", fcmp_ge>; + defm: compare<"lt", fcmp_lt>; + defm: compare<"le", fcmp_le>; +} + multiclass contiguous_load same_size, list wider> { // Intrinsics named with explicit memory and element sizes that match: diff --git a/clang/include/clang/Basic/arm_mve_defs.td b/clang/include/clang/Basic/arm_mve_defs.td --- a/clang/include/clang/Basic/arm_mve_defs.td +++ b/clang/include/clang/Basic/arm_mve_defs.td @@ -86,6 +86,23 @@ } def zeroinit: IRFunction<"llvm::Constant::getNullValue">; def undef: IRFunction<"UndefValue::get">; +def icmp_eq: IRBuilder<"CreateICmpEQ">; +def icmp_ne: IRBuilder<"CreateICmpNE">; +def icmp_ugt: IRBuilder<"CreateICmpUGT">; +def icmp_uge: IRBuilder<"CreateICmpUGE">; +def icmp_ult: IRBuilder<"CreateICmpULT">; +def icmp_ule: IRBuilder<"CreateICmpULE">; +def icmp_sgt: IRBuilder<"CreateICmpSGT">; +def icmp_sge: IRBuilder<"CreateICmpSGE">; +def icmp_slt: IRBuilder<"CreateICmpSLT">; +def icmp_sle: IRBuilder<"CreateICmpSLE">; +def fcmp_eq: IRBuilder<"CreateFCmpOEQ">; +def fcmp_ne: IRBuilder<"CreateFCmpUNE">; // not O: it must return true on NaNs +def fcmp_gt: IRBuilder<"CreateFCmpOGT">; +def fcmp_ge: IRBuilder<"CreateFCmpOGE">; +def fcmp_lt: IRBuilder<"CreateFCmpOLT">; +def fcmp_le: IRBuilder<"CreateFCmpOLE">; +def dup: CGHelperFn<"ARMMVEDupVector">; // A node that makes an Address out of a pointer-typed Value, by // providing an alignment as the second argument. diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -6795,6 +6795,33 @@ return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T); } +static llvm::Value *ARMMVEDupVector(CGBuilderTy &Builder, llvm::Value *V) { + // Helper function to duplicate a scalar value V into all lanes of an MVE + // vector. This is used for the vdup builtins, and also for many builtins + // taking a (vector, scalar) operand pair. + + // MVE vectors are all 128 bits wide. + llvm::Type *VecTy = llvm::VectorType::get( + V->getType(), 128 / V->getType()->getPrimitiveSizeInBits()); + + // Make an undefined vector. + llvm::Value *VecUndef = UndefValue::get(VecTy); + + // Make a vector which has V in lane 0. + llvm::Value *VecV = + Builder.CreateInsertElement(VecUndef, V, static_cast(0)); + + // Make a vector of i32s to use as the shufflevector index list. It should + // have the same number of i32 as VecTy has lanes, and they should all be + // zero. + llvm::Value *VecIndex = llvm::Constant::getNullValue(llvm::VectorType::get( + Builder.getInt32Ty(), VecTy->getVectorNumElements())); + + // And use shufflevector to replicate lane 0 of VecV into all lanes of the + // output (reusing VecUndef for the other vector input). + return Builder.CreateShuffleVector(VecV, VecUndef, VecIndex); +} + Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, diff --git a/clang/test/CodeGen/arm-mve-intrinsics/compare.c b/clang/test/CodeGen/arm-mve-intrinsics/compare.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/arm-mve-intrinsics/compare.c @@ -0,0 +1,3150 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg -sroa | FileCheck %s +// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg -sroa | FileCheck %s + +#include + +// CHECK-LABEL: @test_vcmpeqq_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpeqq_f16(float16x8_t a, float16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpeqq_f32(float32x4_t a, float32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpeqq_s8(int8x16_t a, int8x16_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpeqq_s16(int16x8_t a, int16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpeqq_s32(int32x4_t a, int32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpeqq_u8(uint8x16_t a, uint8x16_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_u8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpeqq_u16(uint16x8_t a, uint16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_u16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpeqq_u32(uint32x4_t a, uint32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_u32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x half> [[TMP2]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP4]]) +// CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i16 +// CHECK-NEXT: ret i16 [[TMP6]] +// +mve_pred16_t test_vcmpeqq_n_f16(float16x8_t a, float16_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_n_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpeqq_n_f32(float32x4_t a, float32_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_n_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[TMP0]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpeqq_n_s8(int8x16_t a, int8_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_n_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpeqq_n_s16(int16x8_t a, int16_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_n_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpeqq_n_s32(int32x4_t a, int32_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_n_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_n_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[TMP0]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpeqq_n_u8(uint8x16_t a, uint8_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_n_u8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_n_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpeqq_n_u16(uint16x8_t a, uint16_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_n_u16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_n_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpeqq_n_u32(uint32x4_t a, uint32_t b) +{ +#ifdef POLYMORPHIC + return vcmpeqq(a, b); +#else /* POLYMORPHIC */ + return vcmpeqq_n_u32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpeqq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpeqq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpeqq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpeqq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpeqq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpeqq_m_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_u8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpeqq_m_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_u16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpeqq_m_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_u32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x half> [[TMP4]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP6:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[TMP5]] +// CHECK-NEXT: [[TMP7:%.*]] = and <8 x i1> [[TMP3]], [[TMP6]] +// CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP7]]) +// CHECK-NEXT: [[TMP9:%.*]] = trunc i32 [[TMP8]] to i16 +// CHECK-NEXT: ret i16 [[TMP9]] +// +mve_pred16_t test_vcmpeqq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_n_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpeqq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_n_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <16 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpeqq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_n_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <8 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpeqq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_n_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpeqq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_n_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_n_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <16 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpeqq_m_n_u8(uint8x16_t a, uint8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_n_u8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_n_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <8 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpeqq_m_n_u16(uint16x8_t a, uint16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_n_u16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpeqq_m_n_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpeqq_m_n_u32(uint32x4_t a, uint32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpeqq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpeqq_m_n_u32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp une <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpneq_f16(float16x8_t a, float16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp une <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpneq_f32(float32x4_t a, float32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpneq_s8(int8x16_t a, int8x16_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpneq_s16(int16x8_t a, int16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpneq_s32(int32x4_t a, int32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpneq_u8(uint8x16_t a, uint8x16_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_u8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpneq_u16(uint16x8_t a, uint16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_u16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpneq_u32(uint32x4_t a, uint32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_u32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x half> [[TMP2]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp une <8 x half> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP4]]) +// CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i16 +// CHECK-NEXT: ret i16 [[TMP6]] +// +mve_pred16_t test_vcmpneq_n_f16(float16x8_t a, float16_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_n_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = fcmp une <4 x float> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpneq_n_f32(float32x4_t a, float32_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_n_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[TMP0]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpneq_n_s8(int8x16_t a, int8_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_n_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpneq_n_s16(int16x8_t a, int16_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_n_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpneq_n_s32(int32x4_t a, int32_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_n_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_n_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[TMP0]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpneq_n_u8(uint8x16_t a, uint8_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_n_u8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_n_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpneq_n_u16(uint16x8_t a, uint16_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_n_u16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_n_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpneq_n_u32(uint32x4_t a, uint32_t b) +{ +#ifdef POLYMORPHIC + return vcmpneq(a, b); +#else /* POLYMORPHIC */ + return vcmpneq_n_u32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp une <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpneq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp une <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpneq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpneq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpneq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpneq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpneq_m_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_u8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpneq_m_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_u16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpneq_m_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_u32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x half> [[TMP4]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP6:%.*]] = fcmp une <8 x half> [[A:%.*]], [[TMP5]] +// CHECK-NEXT: [[TMP7:%.*]] = and <8 x i1> [[TMP3]], [[TMP6]] +// CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP7]]) +// CHECK-NEXT: [[TMP9:%.*]] = trunc i32 [[TMP8]] to i16 +// CHECK-NEXT: ret i16 [[TMP9]] +// +mve_pred16_t test_vcmpneq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_n_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp une <4 x float> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpneq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_n_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <16 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpneq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_n_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <8 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpneq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_n_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpneq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_n_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_n_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <16 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpneq_m_n_u8(uint8x16_t a, uint8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_n_u8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_n_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <8 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpneq_m_n_u16(uint16x8_t a, uint16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_n_u16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpneq_m_n_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpneq_m_n_u32(uint32x4_t a, uint32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpneq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpneq_m_n_u32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpgeq_f16(float16x8_t a, float16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpgeq(a, b); +#else /* POLYMORPHIC */ + return vcmpgeq_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpgeq_f32(float32x4_t a, float32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpgeq(a, b); +#else /* POLYMORPHIC */ + return vcmpgeq_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpgeq_s8(int8x16_t a, int8x16_t b) +{ +#ifdef POLYMORPHIC + return vcmpgeq(a, b); +#else /* POLYMORPHIC */ + return vcmpgeq_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpgeq_s16(int16x8_t a, int16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpgeq(a, b); +#else /* POLYMORPHIC */ + return vcmpgeq_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpgeq_s32(int32x4_t a, int32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpgeq(a, b); +#else /* POLYMORPHIC */ + return vcmpgeq_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpcsq_u8(uint8x16_t a, uint8x16_t b) +{ +#ifdef POLYMORPHIC + return vcmpcsq(a, b); +#else /* POLYMORPHIC */ + return vcmpcsq_u8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpcsq_u16(uint16x8_t a, uint16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpcsq(a, b); +#else /* POLYMORPHIC */ + return vcmpcsq_u16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpcsq_u32(uint32x4_t a, uint32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpcsq(a, b); +#else /* POLYMORPHIC */ + return vcmpcsq_u32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x half> [[TMP2]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP4]]) +// CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i16 +// CHECK-NEXT: ret i16 [[TMP6]] +// +mve_pred16_t test_vcmpgeq_n_f16(float16x8_t a, float16_t b) +{ +#ifdef POLYMORPHIC + return vcmpgeq(a, b); +#else /* POLYMORPHIC */ + return vcmpgeq_n_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpgeq_n_f32(float32x4_t a, float32_t b) +{ +#ifdef POLYMORPHIC + return vcmpgeq(a, b); +#else /* POLYMORPHIC */ + return vcmpgeq_n_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[TMP0]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpgeq_n_s8(int8x16_t a, int8_t b) +{ +#ifdef POLYMORPHIC + return vcmpgeq(a, b); +#else /* POLYMORPHIC */ + return vcmpgeq_n_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpgeq_n_s16(int16x8_t a, int16_t b) +{ +#ifdef POLYMORPHIC + return vcmpgeq(a, b); +#else /* POLYMORPHIC */ + return vcmpgeq_n_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpgeq_n_s32(int32x4_t a, int32_t b) +{ +#ifdef POLYMORPHIC + return vcmpgeq(a, b); +#else /* POLYMORPHIC */ + return vcmpgeq_n_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_n_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[TMP0]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpcsq_n_u8(uint8x16_t a, uint8_t b) +{ +#ifdef POLYMORPHIC + return vcmpcsq(a, b); +#else /* POLYMORPHIC */ + return vcmpcsq_n_u8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_n_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpcsq_n_u16(uint16x8_t a, uint16_t b) +{ +#ifdef POLYMORPHIC + return vcmpcsq(a, b); +#else /* POLYMORPHIC */ + return vcmpcsq_n_u16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_n_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpcsq_n_u32(uint32x4_t a, uint32_t b) +{ +#ifdef POLYMORPHIC + return vcmpcsq(a, b); +#else /* POLYMORPHIC */ + return vcmpcsq_n_u32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_m_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpgeq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgeq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgeq_m_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_m_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpgeq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgeq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgeq_m_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_m_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpgeq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgeq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgeq_m_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_m_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpgeq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgeq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgeq_m_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_m_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpgeq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgeq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgeq_m_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_m_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpcsq_m_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpcsq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpcsq_m_u8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_m_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpcsq_m_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpcsq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpcsq_m_u16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_m_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpcsq_m_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpcsq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpcsq_m_u32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_m_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x half> [[TMP4]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP6:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[TMP5]] +// CHECK-NEXT: [[TMP7:%.*]] = and <8 x i1> [[TMP3]], [[TMP6]] +// CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP7]]) +// CHECK-NEXT: [[TMP9:%.*]] = trunc i32 [[TMP8]] to i16 +// CHECK-NEXT: ret i16 [[TMP9]] +// +mve_pred16_t test_vcmpgeq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgeq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgeq_m_n_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_m_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpgeq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgeq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgeq_m_n_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_m_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <16 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpgeq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgeq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgeq_m_n_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_m_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <8 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpgeq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgeq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgeq_m_n_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgeq_m_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpgeq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgeq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgeq_m_n_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_m_n_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <16 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpcsq_m_n_u8(uint8x16_t a, uint8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpcsq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpcsq_m_n_u8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_m_n_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <8 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpcsq_m_n_u16(uint16x8_t a, uint16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpcsq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpcsq_m_n_u16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpcsq_m_n_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpcsq_m_n_u32(uint32x4_t a, uint32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpcsq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpcsq_m_n_u32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpgtq_f16(float16x8_t a, float16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpgtq(a, b); +#else /* POLYMORPHIC */ + return vcmpgtq_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpgtq_f32(float32x4_t a, float32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpgtq(a, b); +#else /* POLYMORPHIC */ + return vcmpgtq_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpgtq_s8(int8x16_t a, int8x16_t b) +{ +#ifdef POLYMORPHIC + return vcmpgtq(a, b); +#else /* POLYMORPHIC */ + return vcmpgtq_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpgtq_s16(int16x8_t a, int16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpgtq(a, b); +#else /* POLYMORPHIC */ + return vcmpgtq_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpgtq_s32(int32x4_t a, int32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpgtq(a, b); +#else /* POLYMORPHIC */ + return vcmpgtq_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmphiq_u8(uint8x16_t a, uint8x16_t b) +{ +#ifdef POLYMORPHIC + return vcmphiq(a, b); +#else /* POLYMORPHIC */ + return vcmphiq_u8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmphiq_u16(uint16x8_t a, uint16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmphiq(a, b); +#else /* POLYMORPHIC */ + return vcmphiq_u16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmphiq_u32(uint32x4_t a, uint32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmphiq(a, b); +#else /* POLYMORPHIC */ + return vcmphiq_u32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x half> [[TMP2]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP4]]) +// CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i16 +// CHECK-NEXT: ret i16 [[TMP6]] +// +mve_pred16_t test_vcmpgtq_n_f16(float16x8_t a, float16_t b) +{ +#ifdef POLYMORPHIC + return vcmpgtq(a, b); +#else /* POLYMORPHIC */ + return vcmpgtq_n_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpgtq_n_f32(float32x4_t a, float32_t b) +{ +#ifdef POLYMORPHIC + return vcmpgtq(a, b); +#else /* POLYMORPHIC */ + return vcmpgtq_n_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[TMP0]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpgtq_n_s8(int8x16_t a, int8_t b) +{ +#ifdef POLYMORPHIC + return vcmpgtq(a, b); +#else /* POLYMORPHIC */ + return vcmpgtq_n_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpgtq_n_s16(int16x8_t a, int16_t b) +{ +#ifdef POLYMORPHIC + return vcmpgtq(a, b); +#else /* POLYMORPHIC */ + return vcmpgtq_n_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpgtq_n_s32(int32x4_t a, int32_t b) +{ +#ifdef POLYMORPHIC + return vcmpgtq(a, b); +#else /* POLYMORPHIC */ + return vcmpgtq_n_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_n_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[TMP0]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmphiq_n_u8(uint8x16_t a, uint8_t b) +{ +#ifdef POLYMORPHIC + return vcmphiq(a, b); +#else /* POLYMORPHIC */ + return vcmphiq_n_u8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_n_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmphiq_n_u16(uint16x8_t a, uint16_t b) +{ +#ifdef POLYMORPHIC + return vcmphiq(a, b); +#else /* POLYMORPHIC */ + return vcmphiq_n_u16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_n_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmphiq_n_u32(uint32x4_t a, uint32_t b) +{ +#ifdef POLYMORPHIC + return vcmphiq(a, b); +#else /* POLYMORPHIC */ + return vcmphiq_n_u32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_m_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpgtq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgtq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgtq_m_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_m_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpgtq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgtq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgtq_m_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_m_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpgtq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgtq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgtq_m_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_m_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpgtq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgtq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgtq_m_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_m_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpgtq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgtq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgtq_m_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_m_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmphiq_m_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmphiq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmphiq_m_u8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_m_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmphiq_m_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmphiq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmphiq_m_u16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_m_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmphiq_m_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmphiq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmphiq_m_u32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_m_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x half> [[TMP4]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP6:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[TMP5]] +// CHECK-NEXT: [[TMP7:%.*]] = and <8 x i1> [[TMP3]], [[TMP6]] +// CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP7]]) +// CHECK-NEXT: [[TMP9:%.*]] = trunc i32 [[TMP8]] to i16 +// CHECK-NEXT: ret i16 [[TMP9]] +// +mve_pred16_t test_vcmpgtq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgtq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgtq_m_n_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_m_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpgtq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgtq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgtq_m_n_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_m_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <16 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpgtq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgtq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgtq_m_n_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_m_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <8 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpgtq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgtq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgtq_m_n_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpgtq_m_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpgtq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpgtq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpgtq_m_n_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_m_n_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <16 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmphiq_m_n_u8(uint8x16_t a, uint8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmphiq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmphiq_m_n_u8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_m_n_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <8 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmphiq_m_n_u16(uint16x8_t a, uint16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmphiq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmphiq_m_n_u16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmphiq_m_n_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmphiq_m_n_u32(uint32x4_t a, uint32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmphiq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmphiq_m_n_u32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpleq_f16(float16x8_t a, float16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpleq(a, b); +#else /* POLYMORPHIC */ + return vcmpleq_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpleq_f32(float32x4_t a, float32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpleq(a, b); +#else /* POLYMORPHIC */ + return vcmpleq_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpleq_s8(int8x16_t a, int8x16_t b) +{ +#ifdef POLYMORPHIC + return vcmpleq(a, b); +#else /* POLYMORPHIC */ + return vcmpleq_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpleq_s16(int16x8_t a, int16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpleq(a, b); +#else /* POLYMORPHIC */ + return vcmpleq_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpleq_s32(int32x4_t a, int32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpleq(a, b); +#else /* POLYMORPHIC */ + return vcmpleq_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x half> [[TMP2]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP4]]) +// CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i16 +// CHECK-NEXT: ret i16 [[TMP6]] +// +mve_pred16_t test_vcmpleq_n_f16(float16x8_t a, float16_t b) +{ +#ifdef POLYMORPHIC + return vcmpleq(a, b); +#else /* POLYMORPHIC */ + return vcmpleq_n_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpleq_n_f32(float32x4_t a, float32_t b) +{ +#ifdef POLYMORPHIC + return vcmpleq(a, b); +#else /* POLYMORPHIC */ + return vcmpleq_n_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[TMP0]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpleq_n_s8(int8x16_t a, int8_t b) +{ +#ifdef POLYMORPHIC + return vcmpleq(a, b); +#else /* POLYMORPHIC */ + return vcmpleq_n_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpleq_n_s16(int16x8_t a, int16_t b) +{ +#ifdef POLYMORPHIC + return vcmpleq(a, b); +#else /* POLYMORPHIC */ + return vcmpleq_n_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpleq_n_s32(int32x4_t a, int32_t b) +{ +#ifdef POLYMORPHIC + return vcmpleq(a, b); +#else /* POLYMORPHIC */ + return vcmpleq_n_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_m_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpleq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpleq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpleq_m_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_m_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpleq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpleq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpleq_m_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_m_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpleq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpleq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpleq_m_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_m_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpleq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpleq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpleq_m_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_m_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpleq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpleq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpleq_m_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_m_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x half> [[TMP4]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP6:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[TMP5]] +// CHECK-NEXT: [[TMP7:%.*]] = and <8 x i1> [[TMP3]], [[TMP6]] +// CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP7]]) +// CHECK-NEXT: [[TMP9:%.*]] = trunc i32 [[TMP8]] to i16 +// CHECK-NEXT: ret i16 [[TMP9]] +// +mve_pred16_t test_vcmpleq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpleq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpleq_m_n_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_m_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpleq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpleq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpleq_m_n_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_m_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <16 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpleq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpleq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpleq_m_n_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_m_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <8 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpleq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpleq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpleq_m_n_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpleq_m_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpleq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpleq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpleq_m_n_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpltq_f16(float16x8_t a, float16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpltq(a, b); +#else /* POLYMORPHIC */ + return vcmpltq_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpltq_f32(float32x4_t a, float32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpltq(a, b); +#else /* POLYMORPHIC */ + return vcmpltq_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpltq_s8(int8x16_t a, int8x16_t b) +{ +#ifdef POLYMORPHIC + return vcmpltq(a, b); +#else /* POLYMORPHIC */ + return vcmpltq_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpltq_s16(int16x8_t a, int16x8_t b) +{ +#ifdef POLYMORPHIC + return vcmpltq(a, b); +#else /* POLYMORPHIC */ + return vcmpltq_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +// CHECK-NEXT: ret i16 [[TMP2]] +// +mve_pred16_t test_vcmpltq_s32(int32x4_t a, int32x4_t b) +{ +#ifdef POLYMORPHIC + return vcmpltq(a, b); +#else /* POLYMORPHIC */ + return vcmpltq_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x half> [[TMP2]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP4]]) +// CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i16 +// CHECK-NEXT: ret i16 [[TMP6]] +// +mve_pred16_t test_vcmpltq_n_f16(float16x8_t a, float16_t b) +{ +#ifdef POLYMORPHIC + return vcmpltq(a, b); +#else /* POLYMORPHIC */ + return vcmpltq_n_f16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpltq_n_f32(float32x4_t a, float32_t b) +{ +#ifdef POLYMORPHIC + return vcmpltq(a, b); +#else /* POLYMORPHIC */ + return vcmpltq_n_f32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[TMP0]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpltq_n_s8(int8x16_t a, int8_t b) +{ +#ifdef POLYMORPHIC + return vcmpltq(a, b); +#else /* POLYMORPHIC */ + return vcmpltq_n_s8(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpltq_n_s16(int16x8_t a, int16_t b) +{ +#ifdef POLYMORPHIC + return vcmpltq(a, b); +#else /* POLYMORPHIC */ + return vcmpltq_n_s16(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[TMP1]] +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +// CHECK-NEXT: ret i16 [[TMP4]] +// +mve_pred16_t test_vcmpltq_n_s32(int32x4_t a, int32_t b) +{ +#ifdef POLYMORPHIC + return vcmpltq(a, b); +#else /* POLYMORPHIC */ + return vcmpltq_n_s32(a, b); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_m_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpltq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpltq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpltq_m_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_m_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpltq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpltq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpltq_m_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_m_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpltq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpltq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpltq_m_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_m_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpltq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpltq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpltq_m_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_m_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[B:%.*]] +// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]] +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16 +// CHECK-NEXT: ret i16 [[TMP5]] +// +mve_pred16_t test_vcmpltq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpltq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpltq_m_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_m_n_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B_COERCE:%.*]] to i32 +// CHECK-NEXT: [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half +// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]]) +// CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x half> [[TMP4]], <8 x half> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP6:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[TMP5]] +// CHECK-NEXT: [[TMP7:%.*]] = and <8 x i1> [[TMP3]], [[TMP6]] +// CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP7]]) +// CHECK-NEXT: [[TMP9:%.*]] = trunc i32 [[TMP8]] to i16 +// CHECK-NEXT: ret i16 [[TMP9]] +// +mve_pred16_t test_vcmpltq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpltq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpltq_m_n_f16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_m_n_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpltq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpltq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpltq_m_n_f32(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_m_n_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> undef, i8 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> undef, <16 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <16 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpltq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpltq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpltq_m_n_s8(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_m_n_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> undef, i16 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> undef, <8 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <8 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpltq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpltq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpltq_m_n_s16(a, b, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcmpltq_m_n_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> undef, i32 [[B:%.*]], i64 0 +// CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK-NEXT: [[TMP4:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[TMP3]] +// CHECK-NEXT: [[TMP5:%.*]] = and <4 x i1> [[TMP1]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP5]]) +// CHECK-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i16 +// CHECK-NEXT: ret i16 [[TMP7]] +// +mve_pred16_t test_vcmpltq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcmpltq_m(a, b, p); +#else /* POLYMORPHIC */ + return vcmpltq_m_n_s32(a, b, p); +#endif /* POLYMORPHIC */ +} + diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3308,6 +3308,34 @@ } break; } + case Intrinsic::arm_mve_pred_i2v: { + Value *Arg = II->getArgOperand(0); + Value *ArgArg; + if (match(Arg, m_Intrinsic(m_Value(ArgArg))) && + II->getType() == ArgArg->getType()) + return replaceInstUsesWith(*II, ArgArg); + KnownBits ScalarKnown(32); + if (SimplifyDemandedBits(II, 0, APInt::getLowBitsSet(32, 16), + ScalarKnown, 0)) + return II; + break; + } + case Intrinsic::arm_mve_pred_v2i: { + Value *Arg = II->getArgOperand(0); + Value *ArgArg; + if (match(Arg, m_Intrinsic(m_Value(ArgArg)))) + return replaceInstUsesWith(*II, ArgArg); + if (!II->getMetadata(LLVMContext::MD_range)) { + Type *IntTy32 = Type::getInt32Ty(II->getContext()); + Metadata *M[] = { + ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)), + ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF)) + }; + II->setMetadata(LLVMContext::MD_range, MDNode::get(II->getContext(), M)); + return II; + } + break; + } case Intrinsic::arm_mve_vadc: case Intrinsic::arm_mve_vadc_predicated: { unsigned CarryOp = diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll b/llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll @@ -0,0 +1,22 @@ +; RUN: opt -instcombine %s | llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve --verify-machineinstrs -o - | FileCheck %s + +define arm_aapcs_vfpcc <8 x i16> @test_vpt_block(<8 x i16> %v_inactive, <8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) { +; CHECK-LABEL: test_vpt_block: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vpt.i16 eq, q1, q2 +; CHECK-NEXT: vaddt.i16 q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %0 = icmp eq <8 x i16> %v1, %v2 + %1 = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %0) + %2 = trunc i32 %1 to i16 + %3 = zext i16 %2 to i32 + %4 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %3) + %5 = call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %v3, <8 x i16> %v2, <8 x i1> %4, <8 x i16> %v_inactive) + ret <8 x i16> %5 +} + +declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>) +declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) +declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) + diff --git a/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll b/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll @@ -0,0 +1,236 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -instcombine -S -o - %s | FileCheck %s + +declare i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1>) +declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>) +declare i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1>) + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) +declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) +declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) + +; Round-trip conversions from predicate vector to i32 back to the same +; size of vector should be eliminated. + +define <4 x i1> @v2i2v_4(<4 x i1> %vin) { +; CHECK-LABEL: @v2i2v_4( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret <4 x i1> [[VIN:%.*]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %int) + ret <4 x i1> %vout +} + +define <8 x i1> @v2i2v_8(<8 x i1> %vin) { +; CHECK-LABEL: @v2i2v_8( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret <8 x i1> [[VIN:%.*]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin) + %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %int) + ret <8 x i1> %vout +} + +define <16 x i1> @v2i2v_16(<16 x i1> %vin) { +; CHECK-LABEL: @v2i2v_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret <16 x i1> [[VIN:%.*]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin) + %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %int) + ret <16 x i1> %vout +} + +; Conversions from a predicate vector to i32 and then to a _different_ +; size of predicate vector should be left alone. + +define <16 x i1> @v2i2v_4_16(<4 x i1> %vin) { +; CHECK-LABEL: @v2i2v_4_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[INT:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: [[VOUT:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[INT]]) +; CHECK-NEXT: ret <16 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %int) + ret <16 x i1> %vout +} + +define <4 x i1> @v2i2v_8_4(<8 x i1> %vin) { +; CHECK-LABEL: @v2i2v_8_4( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[INT:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[INT]]) +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin) + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %int) + ret <4 x i1> %vout +} + +define <8 x i1> @v2i2v_16_8(<16 x i1> %vin) { +; CHECK-LABEL: @v2i2v_16_8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[INT:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: [[VOUT:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[INT]]) +; CHECK-NEXT: ret <8 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin) + %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %int) + ret <8 x i1> %vout +} + +; Round-trip conversions from i32 to predicate vector back to i32 +; should be eliminated. + +define i32 @i2v2i_4(i32 %iin) { +; CHECK-LABEL: @i2v2i_4( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i32 [[IIN:%.*]] +; +entry: + %vec = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %iin) + %iout = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vec) + ret i32 %iout +} + +define i32 @i2v2i_8(i32 %iin) { +; CHECK-LABEL: @i2v2i_8( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i32 [[IIN:%.*]] +; +entry: + %vec = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %iin) + %iout = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vec) + ret i32 %iout +} + +define i32 @i2v2i_16(i32 %iin) { +; CHECK-LABEL: @i2v2i_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i32 [[IIN:%.*]] +; +entry: + %vec = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %iin) + %iout = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vec) + ret i32 %iout +} + +; v2i leaves the top 16 bits clear. So a trunc/zext pair applied to +; its output, going via i16, can be completely eliminated - but not +; one going via i8. Similarly with other methods of clearing the top +; bits, like bitwise and. + +define i32 @v2i_truncext_i16(<4 x i1> %vin) { +; CHECK-LABEL: @v2i_truncext_i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: ret i32 [[WIDE1]] +; +entry: + %wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %narrow = trunc i32 %wide1 to i16 + %wide2 = zext i16 %narrow to i32 + ret i32 %wide2 +} + +define i32 @v2i_truncext_i8(<4 x i1> %vin) { +; CHECK-LABEL: @v2i_truncext_i8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1]], 255 +; CHECK-NEXT: ret i32 [[WIDE2]] +; +entry: + %wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %narrow = trunc i32 %wide1 to i8 + %wide2 = zext i8 %narrow to i32 + ret i32 %wide2 +} + +define i32 @v2i_and_16(<4 x i1> %vin) { +; CHECK-LABEL: @v2i_and_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: ret i32 [[WIDE1]] +; +entry: + %wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %wide2 = and i32 %wide1, 65535 + ret i32 %wide2 +} + +define i32 @v2i_and_15(<4 x i1> %vin) { +; CHECK-LABEL: @v2i_and_15( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1]], 32767 +; CHECK-NEXT: ret i32 [[WIDE2]] +; +entry: + %wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %wide2 = and i32 %wide1, 32767 + ret i32 %wide2 +} + +; i2v doesn't use the top bits of its input. So the same operations +; on a value that's about to be passed to i2v can be eliminated. + +define <4 x i1> @i2v_truncext_i16(i32 %wide1) { +; CHECK-LABEL: @i2v_truncext_i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE1:%.*]]) +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %narrow = trunc i32 %wide1 to i16 + %wide2 = zext i16 %narrow to i32 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2) + ret <4 x i1> %vout +} + +define <4 x i1> @i2v_truncext_i8(i32 %wide1) { +; CHECK-LABEL: @i2v_truncext_i8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1:%.*]], 255 +; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE2]]) +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %narrow = trunc i32 %wide1 to i8 + %wide2 = zext i8 %narrow to i32 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2) + ret <4 x i1> %vout +} + +define <4 x i1> @i2v_and_16(i32 %wide1) { +; CHECK-LABEL: @i2v_and_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE1:%.*]]) +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %wide2 = and i32 %wide1, 65535 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2) + ret <4 x i1> %vout +} + +define <4 x i1> @i2v_and_15(i32 %wide1) { +; CHECK-LABEL: @i2v_and_15( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1:%.*]], 32767 +; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE2]]) +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %wide2 = and i32 %wide1, 32767 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2) + ret <4 x i1> %vout +}