Index: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp @@ -283,6 +283,7 @@ if (HasMVEFP) { setOperationAction(ISD::FMINNUM, VT, Legal); setOperationAction(ISD::FMAXNUM, VT, Legal); + setOperationAction(ISD::FROUND, VT, Legal); // No native support for these. setOperationAction(ISD::FDIV, VT, Expand); @@ -296,6 +297,7 @@ setOperationAction(ISD::FLOG10, VT, Expand); setOperationAction(ISD::FEXP, VT, Expand); setOperationAction(ISD::FEXP2, VT, Expand); + setOperationAction(ISD::FNEARBYINT, VT, Expand); } } Index: llvm/trunk/lib/Target/ARM/ARMInstrMVE.td =================================================================== --- llvm/trunk/lib/Target/ARM/ARMInstrMVE.td +++ llvm/trunk/lib/Target/ARM/ARMInstrMVE.td @@ -2325,6 +2325,29 @@ defm MVE_VRINTf16 : MVE_VRINT_ops<"f16", 0b01>; defm MVE_VRINTf32 : MVE_VRINT_ops<"f32", 0b10>; +let Predicates = [HasMVEFloat] in { + def : Pat<(v4f32 (frint (v4f32 MQPR:$val1))), + (v4f32 (MVE_VRINTf32X (v4f32 MQPR:$val1)))>; + def : Pat<(v8f16 (frint (v8f16 MQPR:$val1))), + (v8f16 (MVE_VRINTf16X (v8f16 MQPR:$val1)))>; + def : Pat<(v4f32 (fround (v4f32 MQPR:$val1))), + (v4f32 (MVE_VRINTf32A (v4f32 MQPR:$val1)))>; + def : Pat<(v8f16 (fround (v8f16 MQPR:$val1))), + (v8f16 (MVE_VRINTf16A (v8f16 MQPR:$val1)))>; + def : Pat<(v4f32 (ftrunc (v4f32 MQPR:$val1))), + (v4f32 (MVE_VRINTf32Z (v4f32 MQPR:$val1)))>; + def : Pat<(v8f16 (ftrunc (v8f16 MQPR:$val1))), + (v8f16 (MVE_VRINTf16Z (v8f16 MQPR:$val1)))>; + def : Pat<(v4f32 (ffloor (v4f32 MQPR:$val1))), + (v4f32 (MVE_VRINTf32M (v4f32 MQPR:$val1)))>; + def : Pat<(v8f16 (ffloor (v8f16 MQPR:$val1))), + (v8f16 (MVE_VRINTf16M (v8f16 MQPR:$val1)))>; + def : Pat<(v4f32 (fceil (v4f32 MQPR:$val1))), + (v4f32 (MVE_VRINTf32P (v4f32 MQPR:$val1)))>; + def : Pat<(v8f16 (fceil (v8f16 MQPR:$val1))), + (v8f16 (MVE_VRINTf16P (v8f16 MQPR:$val1)))>; +} + class MVEFloatArithNeon pattern=[]> Index: llvm/trunk/test/CodeGen/Thumb2/mve-fmath.ll =================================================================== --- llvm/trunk/test/CodeGen/Thumb2/mve-fmath.ll +++ llvm/trunk/test/CodeGen/Thumb2/mve-fmath.ll @@ -1165,6 +1165,181 @@ ret <8 x half> %0 } +define arm_aapcs_vfpcc <4 x float> @copysign_float32_t(<4 x float> %src1, <4 x float> %src2) { +; CHECK-LABEL: copysign_float32_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: .pad #32 +; CHECK-NEXT: sub sp, #32 +; CHECK-NEXT: vstr s5, [sp, #8] +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: vstr s6, [sp, #16] +; CHECK-NEXT: ldr.w lr, [sp, #16] +; CHECK-NEXT: vstr s7, [sp, #24] +; CHECK-NEXT: lsr.w r2, r12, #31 +; CHECK-NEXT: ldr r6, [sp, #24] +; CHECK-NEXT: vstr s3, [sp, #28] +; CHECK-NEXT: ldr r3, [sp, #28] +; CHECK-NEXT: vstr s4, [sp] +; CHECK-NEXT: ldr r0, [sp] +; CHECK-NEXT: vstr s0, [sp, #4] +; CHECK-NEXT: ldr r1, [sp, #4] +; CHECK-NEXT: vstr s1, [sp, #12] +; CHECK-NEXT: lsrs r0, r0, #31 +; CHECK-NEXT: vstr s2, [sp, #20] +; CHECK-NEXT: bfi r1, r0, #31, #1 +; CHECK-NEXT: ldr r4, [sp, #12] +; CHECK-NEXT: ldr r5, [sp, #20] +; CHECK-NEXT: bfi r4, r2, #31, #1 +; CHECK-NEXT: lsr.w r2, lr, #31 +; CHECK-NEXT: bfi r5, r2, #31, #1 +; CHECK-NEXT: lsrs r2, r6, #31 +; CHECK-NEXT: bfi r3, r2, #31, #1 +; CHECK-NEXT: vmov s3, r3 +; CHECK-NEXT: vmov s2, r5 +; CHECK-NEXT: vmov s1, r4 +; CHECK-NEXT: vmov s0, r1 +; CHECK-NEXT: add sp, #32 +; CHECK-NEXT: pop {r4, r5, r6, pc} +entry: + %0 = call fast <4 x float> @llvm.copysign.v4f32(<4 x float> %src1, <4 x float> %src2) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @copysign_float16_t(<8 x half> %src1, <8 x half> %src2) { +; CHECK-LABEL: copysign_float16_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .pad #32 +; CHECK-NEXT: sub sp, #32 +; CHECK-NEXT: vmov.u16 r0, q1[1] +; CHECK-NEXT: vmov.u16 r1, q0[0] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[0] +; CHECK-NEXT: vstr.16 s8, [sp, #24] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[2] +; CHECK-NEXT: vstr.16 s8, [sp, #28] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[3] +; CHECK-NEXT: vstr.16 s8, [sp, #20] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[4] +; CHECK-NEXT: vstr.16 s8, [sp, #16] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[5] +; CHECK-NEXT: vstr.16 s8, [sp, #12] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[6] +; CHECK-NEXT: vstr.16 s8, [sp, #8] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[7] +; CHECK-NEXT: vmov s4, r0 +; CHECK-NEXT: vstr.16 s8, [sp, #4] +; CHECK-NEXT: vstr.16 s4, [sp] +; CHECK-NEXT: vmov.u16 r0, q0[1] +; CHECK-NEXT: vmov s4, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #25] +; CHECK-NEXT: vabs.f16 s4, s4 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s6, s4 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s4, s4, s6 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: vmov s4, r1 +; CHECK-NEXT: ldrb.w r1, [sp, #29] +; CHECK-NEXT: vabs.f16 s4, s4 +; CHECK-NEXT: ands r1, r1, #128 +; CHECK-NEXT: vneg.f16 s6, s4 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vseleq.f16 s4, s4, s6 +; CHECK-NEXT: vmov r1, s4 +; CHECK-NEXT: vmov.16 q1[0], r1 +; CHECK-NEXT: vmov.16 q1[1], r0 +; CHECK-NEXT: vmov.u16 r0, q0[2] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #21] +; CHECK-NEXT: vabs.f16 s8, s8 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s10, s8 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s8, s8, s10 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[2], r0 +; CHECK-NEXT: vmov.u16 r0, q0[3] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #17] +; CHECK-NEXT: vabs.f16 s8, s8 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s10, s8 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s8, s8, s10 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[3], r0 +; CHECK-NEXT: vmov.u16 r0, q0[4] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #13] +; CHECK-NEXT: vabs.f16 s8, s8 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s10, s8 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s8, s8, s10 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[4], r0 +; CHECK-NEXT: vmov.u16 r0, q0[5] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #9] +; CHECK-NEXT: vabs.f16 s8, s8 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s10, s8 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s8, s8, s10 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[5], r0 +; CHECK-NEXT: vmov.u16 r0, q0[6] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #5] +; CHECK-NEXT: vabs.f16 s8, s8 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s10, s8 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s8, s8, s10 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[6], r0 +; CHECK-NEXT: vmov.u16 r0, q0[7] +; CHECK-NEXT: vmov s0, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #1] +; CHECK-NEXT: vabs.f16 s0, s0 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s2, s0 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s0, s0, s2 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: vmov.16 q1[7], r0 +; CHECK-NEXT: vmov q0, q1 +; CHECK-NEXT: add sp, #32 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.copysign.v8f16(<8 x half> %src1, <8 x half> %src2) + ret <8 x half> %0 +} + declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) declare <4 x float> @llvm.cos.v4f32(<4 x float>) declare <4 x float> @llvm.sin.v4f32(<4 x float>) @@ -1174,6 +1349,7 @@ declare <4 x float> @llvm.log2.v4f32(<4 x float>) declare <4 x float> @llvm.log10.v4f32(<4 x float>) declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>) +declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) declare <8 x half> @llvm.sqrt.v8f16(<8 x half>) declare <8 x half> @llvm.cos.v8f16(<8 x half>) declare <8 x half> @llvm.sin.v8f16(<8 x half>) @@ -1183,4 +1359,5 @@ declare <8 x half> @llvm.log2.v8f16(<8 x half>) declare <8 x half> @llvm.log10.v8f16(<8 x half>) declare <8 x half> @llvm.pow.v8f16(<8 x half>, <8 x half>) +declare <8 x half> @llvm.copysign.v8f16(<8 x half>, <8 x half>) Index: llvm/trunk/test/CodeGen/Thumb2/mve-frint.ll =================================================================== --- llvm/trunk/test/CodeGen/Thumb2/mve-frint.ll +++ llvm/trunk/test/CodeGen/Thumb2/mve-frint.ll @@ -0,0 +1,450 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-MVE +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-MVEFP + +define arm_aapcs_vfpcc <4 x float> @fceil_float32_t(<4 x float> %src) { +; CHECK-MVE-LABEL: fceil_float32_t: +; CHECK-MVE: @ %bb.0: @ %entry +; CHECK-MVE-NEXT: vrintp.f32 s7, s3 +; CHECK-MVE-NEXT: vrintp.f32 s6, s2 +; CHECK-MVE-NEXT: vrintp.f32 s5, s1 +; CHECK-MVE-NEXT: vrintp.f32 s4, s0 +; CHECK-MVE-NEXT: vmov q0, q1 +; CHECK-MVE-NEXT: bx lr +; +; CHECK-MVEFP-LABEL: fceil_float32_t: +; CHECK-MVEFP: @ %bb.0: @ %entry +; CHECK-MVEFP-NEXT: vrintp.f32 q0, q0 +; CHECK-MVEFP-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.ceil.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @fceil_float16_t(<8 x half> %src) { +; CHECK-MVE-LABEL: fceil_float16_t: +; CHECK-MVE: @ %bb.0: @ %entry +; CHECK-MVE-NEXT: vmov.u16 r0, q0[0] +; CHECK-MVE-NEXT: vmov.u16 r1, q0[1] +; CHECK-MVE-NEXT: vmov s4, r0 +; CHECK-MVE-NEXT: vrintp.f16 s4, s4 +; CHECK-MVE-NEXT: vmov r0, s4 +; CHECK-MVE-NEXT: vmov s4, r1 +; CHECK-MVE-NEXT: vrintp.f16 s4, s4 +; CHECK-MVE-NEXT: vmov r1, s4 +; CHECK-MVE-NEXT: vmov.16 q1[0], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[2] +; CHECK-MVE-NEXT: vmov.16 q1[1], r1 +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintp.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[2], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[3] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintp.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[3], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[4] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintp.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[4], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[5] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintp.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[5], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[6] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintp.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[6], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[7] +; CHECK-MVE-NEXT: vmov s0, r0 +; CHECK-MVE-NEXT: vrintp.f16 s0, s0 +; CHECK-MVE-NEXT: vmov r0, s0 +; CHECK-MVE-NEXT: vmov.16 q1[7], r0 +; CHECK-MVE-NEXT: vmov q0, q1 +; CHECK-MVE-NEXT: bx lr +; +; CHECK-MVEFP-LABEL: fceil_float16_t: +; CHECK-MVEFP: @ %bb.0: @ %entry +; CHECK-MVEFP-NEXT: vrintp.f16 q0, q0 +; CHECK-MVEFP-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.ceil.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <4 x float> @ftrunc_float32_t(<4 x float> %src) { +; CHECK-MVE-LABEL: ftrunc_float32_t: +; CHECK-MVE: @ %bb.0: @ %entry +; CHECK-MVE-NEXT: vrintz.f32 s7, s3 +; CHECK-MVE-NEXT: vrintz.f32 s6, s2 +; CHECK-MVE-NEXT: vrintz.f32 s5, s1 +; CHECK-MVE-NEXT: vrintz.f32 s4, s0 +; CHECK-MVE-NEXT: vmov q0, q1 +; CHECK-MVE-NEXT: bx lr +; +; CHECK-MVEFP-LABEL: ftrunc_float32_t: +; CHECK-MVEFP: @ %bb.0: @ %entry +; CHECK-MVEFP-NEXT: vrintz.f32 q0, q0 +; CHECK-MVEFP-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.trunc.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @ftrunc_float16_t(<8 x half> %src) { +; CHECK-MVE-LABEL: ftrunc_float16_t: +; CHECK-MVE: @ %bb.0: @ %entry +; CHECK-MVE-NEXT: vmov.u16 r0, q0[0] +; CHECK-MVE-NEXT: vmov.u16 r1, q0[1] +; CHECK-MVE-NEXT: vmov s4, r0 +; CHECK-MVE-NEXT: vrintz.f16 s4, s4 +; CHECK-MVE-NEXT: vmov r0, s4 +; CHECK-MVE-NEXT: vmov s4, r1 +; CHECK-MVE-NEXT: vrintz.f16 s4, s4 +; CHECK-MVE-NEXT: vmov r1, s4 +; CHECK-MVE-NEXT: vmov.16 q1[0], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[2] +; CHECK-MVE-NEXT: vmov.16 q1[1], r1 +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintz.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[2], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[3] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintz.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[3], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[4] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintz.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[4], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[5] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintz.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[5], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[6] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintz.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[6], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[7] +; CHECK-MVE-NEXT: vmov s0, r0 +; CHECK-MVE-NEXT: vrintz.f16 s0, s0 +; CHECK-MVE-NEXT: vmov r0, s0 +; CHECK-MVE-NEXT: vmov.16 q1[7], r0 +; CHECK-MVE-NEXT: vmov q0, q1 +; CHECK-MVE-NEXT: bx lr +; +; CHECK-MVEFP-LABEL: ftrunc_float16_t: +; CHECK-MVEFP: @ %bb.0: @ %entry +; CHECK-MVEFP-NEXT: vrintz.f16 q0, q0 +; CHECK-MVEFP-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.trunc.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <4 x float> @frint_float32_t(<4 x float> %src) { +; CHECK-MVE-LABEL: frint_float32_t: +; CHECK-MVE: @ %bb.0: @ %entry +; CHECK-MVE-NEXT: vrintx.f32 s7, s3 +; CHECK-MVE-NEXT: vrintx.f32 s6, s2 +; CHECK-MVE-NEXT: vrintx.f32 s5, s1 +; CHECK-MVE-NEXT: vrintx.f32 s4, s0 +; CHECK-MVE-NEXT: vmov q0, q1 +; CHECK-MVE-NEXT: bx lr +; +; CHECK-MVEFP-LABEL: frint_float32_t: +; CHECK-MVEFP: @ %bb.0: @ %entry +; CHECK-MVEFP-NEXT: vrintx.f32 q0, q0 +; CHECK-MVEFP-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.rint.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @frint_float16_t(<8 x half> %src) { +; CHECK-MVE-LABEL: frint_float16_t: +; CHECK-MVE: @ %bb.0: @ %entry +; CHECK-MVE-NEXT: vmov.u16 r0, q0[0] +; CHECK-MVE-NEXT: vmov.u16 r1, q0[1] +; CHECK-MVE-NEXT: vmov s4, r0 +; CHECK-MVE-NEXT: vrintx.f16 s4, s4 +; CHECK-MVE-NEXT: vmov r0, s4 +; CHECK-MVE-NEXT: vmov s4, r1 +; CHECK-MVE-NEXT: vrintx.f16 s4, s4 +; CHECK-MVE-NEXT: vmov r1, s4 +; CHECK-MVE-NEXT: vmov.16 q1[0], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[2] +; CHECK-MVE-NEXT: vmov.16 q1[1], r1 +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintx.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[2], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[3] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintx.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[3], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[4] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintx.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[4], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[5] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintx.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[5], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[6] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintx.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[6], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[7] +; CHECK-MVE-NEXT: vmov s0, r0 +; CHECK-MVE-NEXT: vrintx.f16 s0, s0 +; CHECK-MVE-NEXT: vmov r0, s0 +; CHECK-MVE-NEXT: vmov.16 q1[7], r0 +; CHECK-MVE-NEXT: vmov q0, q1 +; CHECK-MVE-NEXT: bx lr +; +; CHECK-MVEFP-LABEL: frint_float16_t: +; CHECK-MVEFP: @ %bb.0: @ %entry +; CHECK-MVEFP-NEXT: vrintx.f16 q0, q0 +; CHECK-MVEFP-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.rint.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <4 x float> @fnearbyint_float32_t(<4 x float> %src) { +; CHECK-LABEL: fnearbyint_float32_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrintr.f32 s7, s3 +; CHECK-NEXT: vrintr.f32 s6, s2 +; CHECK-NEXT: vrintr.f32 s5, s1 +; CHECK-NEXT: vrintr.f32 s4, s0 +; CHECK-NEXT: vmov q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.nearbyint.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @fnearbyint_float16_t(<8 x half> %src) { +; CHECK-LABEL: fnearbyint_float16_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.u16 r0, q0[0] +; CHECK-NEXT: vmov.u16 r1, q0[1] +; CHECK-NEXT: vmov s4, r0 +; CHECK-NEXT: vrintr.f16 s4, s4 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: vmov s4, r1 +; CHECK-NEXT: vrintr.f16 s4, s4 +; CHECK-NEXT: vmov r1, s4 +; CHECK-NEXT: vmov.16 q1[0], r0 +; CHECK-NEXT: vmov.u16 r0, q0[2] +; CHECK-NEXT: vmov.16 q1[1], r1 +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vrintr.f16 s8, s8 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[2], r0 +; CHECK-NEXT: vmov.u16 r0, q0[3] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vrintr.f16 s8, s8 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[3], r0 +; CHECK-NEXT: vmov.u16 r0, q0[4] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vrintr.f16 s8, s8 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[4], r0 +; CHECK-NEXT: vmov.u16 r0, q0[5] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vrintr.f16 s8, s8 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[5], r0 +; CHECK-NEXT: vmov.u16 r0, q0[6] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vrintr.f16 s8, s8 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[6], r0 +; CHECK-NEXT: vmov.u16 r0, q0[7] +; CHECK-NEXT: vmov s0, r0 +; CHECK-NEXT: vrintr.f16 s0, s0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: vmov.16 q1[7], r0 +; CHECK-NEXT: vmov q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.nearbyint.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <4 x float> @ffloor_float32_t(<4 x float> %src) { +; CHECK-MVE-LABEL: ffloor_float32_t: +; CHECK-MVE: @ %bb.0: @ %entry +; CHECK-MVE-NEXT: vrintm.f32 s7, s3 +; CHECK-MVE-NEXT: vrintm.f32 s6, s2 +; CHECK-MVE-NEXT: vrintm.f32 s5, s1 +; CHECK-MVE-NEXT: vrintm.f32 s4, s0 +; CHECK-MVE-NEXT: vmov q0, q1 +; CHECK-MVE-NEXT: bx lr +; +; CHECK-MVEFP-LABEL: ffloor_float32_t: +; CHECK-MVEFP: @ %bb.0: @ %entry +; CHECK-MVEFP-NEXT: vrintm.f32 q0, q0 +; CHECK-MVEFP-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.floor.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @ffloor_float16_t(<8 x half> %src) { +; CHECK-MVE-LABEL: ffloor_float16_t: +; CHECK-MVE: @ %bb.0: @ %entry +; CHECK-MVE-NEXT: vmov.u16 r0, q0[0] +; CHECK-MVE-NEXT: vmov.u16 r1, q0[1] +; CHECK-MVE-NEXT: vmov s4, r0 +; CHECK-MVE-NEXT: vrintm.f16 s4, s4 +; CHECK-MVE-NEXT: vmov r0, s4 +; CHECK-MVE-NEXT: vmov s4, r1 +; CHECK-MVE-NEXT: vrintm.f16 s4, s4 +; CHECK-MVE-NEXT: vmov r1, s4 +; CHECK-MVE-NEXT: vmov.16 q1[0], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[2] +; CHECK-MVE-NEXT: vmov.16 q1[1], r1 +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintm.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[2], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[3] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintm.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[3], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[4] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintm.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[4], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[5] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintm.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[5], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[6] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrintm.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[6], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[7] +; CHECK-MVE-NEXT: vmov s0, r0 +; CHECK-MVE-NEXT: vrintm.f16 s0, s0 +; CHECK-MVE-NEXT: vmov r0, s0 +; CHECK-MVE-NEXT: vmov.16 q1[7], r0 +; CHECK-MVE-NEXT: vmov q0, q1 +; CHECK-MVE-NEXT: bx lr +; +; CHECK-MVEFP-LABEL: ffloor_float16_t: +; CHECK-MVEFP: @ %bb.0: @ %entry +; CHECK-MVEFP-NEXT: vrintm.f16 q0, q0 +; CHECK-MVEFP-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.floor.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <4 x float> @fround_float32_t(<4 x float> %src) { +; CHECK-MVE-LABEL: fround_float32_t: +; CHECK-MVE: @ %bb.0: @ %entry +; CHECK-MVE-NEXT: vrinta.f32 s7, s3 +; CHECK-MVE-NEXT: vrinta.f32 s6, s2 +; CHECK-MVE-NEXT: vrinta.f32 s5, s1 +; CHECK-MVE-NEXT: vrinta.f32 s4, s0 +; CHECK-MVE-NEXT: vmov q0, q1 +; CHECK-MVE-NEXT: bx lr +; +; CHECK-MVEFP-LABEL: fround_float32_t: +; CHECK-MVEFP: @ %bb.0: @ %entry +; CHECK-MVEFP-NEXT: vrinta.f32 q0, q0 +; CHECK-MVEFP-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.round.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @fround_float16_t(<8 x half> %src) { +; CHECK-MVE-LABEL: fround_float16_t: +; CHECK-MVE: @ %bb.0: @ %entry +; CHECK-MVE-NEXT: vmov.u16 r0, q0[0] +; CHECK-MVE-NEXT: vmov.u16 r1, q0[1] +; CHECK-MVE-NEXT: vmov s4, r0 +; CHECK-MVE-NEXT: vrinta.f16 s4, s4 +; CHECK-MVE-NEXT: vmov r0, s4 +; CHECK-MVE-NEXT: vmov s4, r1 +; CHECK-MVE-NEXT: vrinta.f16 s4, s4 +; CHECK-MVE-NEXT: vmov r1, s4 +; CHECK-MVE-NEXT: vmov.16 q1[0], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[2] +; CHECK-MVE-NEXT: vmov.16 q1[1], r1 +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrinta.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[2], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[3] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrinta.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[3], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[4] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrinta.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[4], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[5] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrinta.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[5], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[6] +; CHECK-MVE-NEXT: vmov s8, r0 +; CHECK-MVE-NEXT: vrinta.f16 s8, s8 +; CHECK-MVE-NEXT: vmov r0, s8 +; CHECK-MVE-NEXT: vmov.16 q1[6], r0 +; CHECK-MVE-NEXT: vmov.u16 r0, q0[7] +; CHECK-MVE-NEXT: vmov s0, r0 +; CHECK-MVE-NEXT: vrinta.f16 s0, s0 +; CHECK-MVE-NEXT: vmov r0, s0 +; CHECK-MVE-NEXT: vmov.16 q1[7], r0 +; CHECK-MVE-NEXT: vmov q0, q1 +; CHECK-MVE-NEXT: bx lr +; +; CHECK-MVEFP-LABEL: fround_float16_t: +; CHECK-MVEFP: @ %bb.0: @ %entry +; CHECK-MVEFP-NEXT: vrinta.f16 q0, q0 +; CHECK-MVEFP-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.round.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +declare <4 x float> @llvm.ceil.v4f32(<4 x float>) +declare <4 x float> @llvm.trunc.v4f32(<4 x float>) +declare <4 x float> @llvm.rint.v4f32(<4 x float>) +declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) +declare <4 x float> @llvm.floor.v4f32(<4 x float>) +declare <4 x float> @llvm.round.v4f32(<4 x float>) +declare <8 x half> @llvm.ceil.v8f16(<8 x half>) +declare <8 x half> @llvm.trunc.v8f16(<8 x half>) +declare <8 x half> @llvm.rint.v8f16(<8 x half>) +declare <8 x half> @llvm.nearbyint.v8f16(<8 x half>) +declare <8 x half> @llvm.floor.v8f16(<8 x half>) +declare <8 x half> @llvm.round.v8f16(<8 x half>)