Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -292,6 +292,7 @@ if (HasMVEFP) { setOperationAction(ISD::FMINNUM, VT, Legal); setOperationAction(ISD::FMAXNUM, VT, Legal); + setOperationAction(ISD::FROUND, VT, Legal); // No native support for these. setOperationAction(ISD::FDIV, VT, Expand); @@ -305,6 +306,7 @@ setOperationAction(ISD::FLOG10, VT, Expand); setOperationAction(ISD::FEXP, VT, Expand); setOperationAction(ISD::FEXP2, VT, Expand); + setOperationAction(ISD::FNEARBYINT, VT, Expand); } } Index: llvm/lib/Target/ARM/ARMInstrMVE.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrMVE.td +++ llvm/lib/Target/ARM/ARMInstrMVE.td @@ -2318,6 +2318,29 @@ defm MVE_VRINTf16 : MVE_VRINT_ops<"f16", 0b01>; defm MVE_VRINTf32 : MVE_VRINT_ops<"f32", 0b10>; +let Predicates = [HasMVEFloat] in { + def : Pat<(v4f32 (frint (v4f32 MQPR:$val1))), + (v4f32 (MVE_VRINTf32X (v4f32 MQPR:$val1)))>; + def : Pat<(v8f16 (frint (v8f16 MQPR:$val1))), + (v8f16 (MVE_VRINTf16X (v8f16 MQPR:$val1)))>; + def : Pat<(v4f32 (fround (v4f32 MQPR:$val1))), + (v4f32 (MVE_VRINTf32A (v4f32 MQPR:$val1)))>; + def : Pat<(v8f16 (fround (v8f16 MQPR:$val1))), + (v8f16 (MVE_VRINTf16A (v8f16 MQPR:$val1)))>; + def : Pat<(v4f32 (ftrunc (v4f32 MQPR:$val1))), + (v4f32 (MVE_VRINTf32Z (v4f32 MQPR:$val1)))>; + def : Pat<(v8f16 (ftrunc (v8f16 MQPR:$val1))), + (v8f16 (MVE_VRINTf16Z (v8f16 MQPR:$val1)))>; + def : Pat<(v4f32 (ffloor (v4f32 MQPR:$val1))), + (v4f32 (MVE_VRINTf32M (v4f32 MQPR:$val1)))>; + def : Pat<(v8f16 (ffloor (v8f16 MQPR:$val1))), + (v8f16 (MVE_VRINTf16M (v8f16 MQPR:$val1)))>; + def : Pat<(v4f32 (fceil (v4f32 MQPR:$val1))), + (v4f32 (MVE_VRINTf32P (v4f32 MQPR:$val1)))>; + def : Pat<(v8f16 (fceil (v8f16 MQPR:$val1))), + (v8f16 (MVE_VRINTf16P (v8f16 MQPR:$val1)))>; +} + class MVEFloatArithNeon pattern=[]> Index: llvm/test/CodeGen/Thumb2/mve-fmath.ll =================================================================== --- llvm/test/CodeGen/Thumb2/mve-fmath.ll +++ llvm/test/CodeGen/Thumb2/mve-fmath.ll @@ -1165,6 +1165,181 @@ ret <8 x half> %0 } +define arm_aapcs_vfpcc <4 x float> @copysign_float32_t(<4 x float> %src1, <4 x float> %src2) { +; CHECK-LABEL: copysign_float32_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: .pad #32 +; CHECK-NEXT: sub sp, #32 +; CHECK-NEXT: vstr s5, [sp, #8] +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: vstr s6, [sp, #16] +; CHECK-NEXT: ldr.w lr, [sp, #16] +; CHECK-NEXT: vstr s7, [sp, #24] +; CHECK-NEXT: lsr.w r2, r12, #31 +; CHECK-NEXT: ldr r6, [sp, #24] +; CHECK-NEXT: vstr s3, [sp, #28] +; CHECK-NEXT: ldr r3, [sp, #28] +; CHECK-NEXT: vstr s4, [sp] +; CHECK-NEXT: ldr r0, [sp] +; CHECK-NEXT: vstr s0, [sp, #4] +; CHECK-NEXT: ldr r1, [sp, #4] +; CHECK-NEXT: vstr s1, [sp, #12] +; CHECK-NEXT: lsrs r0, r0, #31 +; CHECK-NEXT: vstr s2, [sp, #20] +; CHECK-NEXT: bfi r1, r0, #31, #1 +; CHECK-NEXT: ldr r4, [sp, #12] +; CHECK-NEXT: ldr r5, [sp, #20] +; CHECK-NEXT: bfi r4, r2, #31, #1 +; CHECK-NEXT: lsr.w r2, lr, #31 +; CHECK-NEXT: bfi r5, r2, #31, #1 +; CHECK-NEXT: lsrs r2, r6, #31 +; CHECK-NEXT: bfi r3, r2, #31, #1 +; CHECK-NEXT: vmov s3, r3 +; CHECK-NEXT: vmov s2, r5 +; CHECK-NEXT: vmov s1, r4 +; CHECK-NEXT: vmov s0, r1 +; CHECK-NEXT: add sp, #32 +; CHECK-NEXT: pop {r4, r5, r6, pc} +entry: + %0 = call fast <4 x float> @llvm.copysign.v4f32(<4 x float> %src1, <4 x float> %src2) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @copysign_float16_t(<8 x half> %src1, <8 x half> %src2) { +; CHECK-LABEL: copysign_float16_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .pad #32 +; CHECK-NEXT: sub sp, #32 +; CHECK-NEXT: vmov.u16 r0, q1[1] +; CHECK-NEXT: vmov.u16 r1, q0[0] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[0] +; CHECK-NEXT: vstr.16 s8, [sp, #24] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[2] +; CHECK-NEXT: vstr.16 s8, [sp, #28] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[3] +; CHECK-NEXT: vstr.16 s8, [sp, #20] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[4] +; CHECK-NEXT: vstr.16 s8, [sp, #16] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[5] +; CHECK-NEXT: vstr.16 s8, [sp, #12] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[6] +; CHECK-NEXT: vstr.16 s8, [sp, #8] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vmov.u16 r0, q1[7] +; CHECK-NEXT: vmov s4, r0 +; CHECK-NEXT: vstr.16 s8, [sp, #4] +; CHECK-NEXT: vstr.16 s4, [sp] +; CHECK-NEXT: vmov.u16 r0, q0[1] +; CHECK-NEXT: vmov s4, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #25] +; CHECK-NEXT: vabs.f16 s4, s4 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s6, s4 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s4, s4, s6 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: vmov s4, r1 +; CHECK-NEXT: ldrb.w r1, [sp, #29] +; CHECK-NEXT: vabs.f16 s4, s4 +; CHECK-NEXT: ands r1, r1, #128 +; CHECK-NEXT: vneg.f16 s6, s4 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r1, #1 +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vseleq.f16 s4, s4, s6 +; CHECK-NEXT: vmov r1, s4 +; CHECK-NEXT: vmov.16 q1[0], r1 +; CHECK-NEXT: vmov.16 q1[1], r0 +; CHECK-NEXT: vmov.u16 r0, q0[2] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #21] +; CHECK-NEXT: vabs.f16 s8, s8 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s10, s8 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s8, s8, s10 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[2], r0 +; CHECK-NEXT: vmov.u16 r0, q0[3] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #17] +; CHECK-NEXT: vabs.f16 s8, s8 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s10, s8 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s8, s8, s10 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[3], r0 +; CHECK-NEXT: vmov.u16 r0, q0[4] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #13] +; CHECK-NEXT: vabs.f16 s8, s8 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s10, s8 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s8, s8, s10 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[4], r0 +; CHECK-NEXT: vmov.u16 r0, q0[5] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #9] +; CHECK-NEXT: vabs.f16 s8, s8 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s10, s8 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s8, s8, s10 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[5], r0 +; CHECK-NEXT: vmov.u16 r0, q0[6] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #5] +; CHECK-NEXT: vabs.f16 s8, s8 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s10, s8 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s8, s8, s10 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[6], r0 +; CHECK-NEXT: vmov.u16 r0, q0[7] +; CHECK-NEXT: vmov s0, r0 +; CHECK-NEXT: ldrb.w r0, [sp, #1] +; CHECK-NEXT: vabs.f16 s0, s0 +; CHECK-NEXT: ands r0, r0, #128 +; CHECK-NEXT: vneg.f16 s2, s0 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vseleq.f16 s0, s0, s2 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: vmov.16 q1[7], r0 +; CHECK-NEXT: vmov q0, q1 +; CHECK-NEXT: add sp, #32 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.copysign.v8f16(<8 x half> %src1, <8 x half> %src2) + ret <8 x half> %0 +} + declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) declare <4 x float> @llvm.cos.v4f32(<4 x float>) declare <4 x float> @llvm.sin.v4f32(<4 x float>) @@ -1174,6 +1349,7 @@ declare <4 x float> @llvm.log2.v4f32(<4 x float>) declare <4 x float> @llvm.log10.v4f32(<4 x float>) declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>) +declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) declare <8 x half> @llvm.sqrt.v8f16(<8 x half>) declare <8 x half> @llvm.cos.v8f16(<8 x half>) declare <8 x half> @llvm.sin.v8f16(<8 x half>) @@ -1183,4 +1359,5 @@ declare <8 x half> @llvm.log2.v8f16(<8 x half>) declare <8 x half> @llvm.log10.v8f16(<8 x half>) declare <8 x half> @llvm.pow.v8f16(<8 x half>, <8 x half>) +declare <8 x half> @llvm.copysign.v8f16(<8 x half>, <8 x half>) Index: llvm/test/CodeGen/Thumb2/mve-frint.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/mve-frint.ll @@ -0,0 +1,179 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s + +define arm_aapcs_vfpcc <4 x float> @fceil_float32_t(<4 x float> %src) { +; CHECK-LABEL: fceil_float32_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrintp.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.ceil.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @fceil_float16_t(<8 x half> %src) { +; CHECK-LABEL: fceil_float16_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrintp.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.ceil.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <4 x float> @ftrunc_float32_t(<4 x float> %src) { +; CHECK-LABEL: ftrunc_float32_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrintz.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.trunc.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @ftrunc_float16_t(<8 x half> %src) { +; CHECK-LABEL: ftrunc_float16_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrintz.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.trunc.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <4 x float> @frint_float32_t(<4 x float> %src) { +; CHECK-LABEL: frint_float32_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrintx.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.rint.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @frint_float16_t(<8 x half> %src) { +; CHECK-LABEL: frint_float16_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrintx.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.rint.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <4 x float> @fnearbyint_float32_t(<4 x float> %src) { +; CHECK-LABEL: fnearbyint_float32_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrintr.f32 s7, s3 +; CHECK-NEXT: vrintr.f32 s6, s2 +; CHECK-NEXT: vrintr.f32 s5, s1 +; CHECK-NEXT: vrintr.f32 s4, s0 +; CHECK-NEXT: vmov q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.nearbyint.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @fnearbyint_float16_t(<8 x half> %src) { +; CHECK-LABEL: fnearbyint_float16_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.u16 r0, q0[0] +; CHECK-NEXT: vmov.u16 r1, q0[1] +; CHECK-NEXT: vmov s4, r0 +; CHECK-NEXT: vrintr.f16 s4, s4 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: vmov s4, r1 +; CHECK-NEXT: vrintr.f16 s4, s4 +; CHECK-NEXT: vmov r1, s4 +; CHECK-NEXT: vmov.16 q1[0], r0 +; CHECK-NEXT: vmov.u16 r0, q0[2] +; CHECK-NEXT: vmov.16 q1[1], r1 +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vrintr.f16 s8, s8 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[2], r0 +; CHECK-NEXT: vmov.u16 r0, q0[3] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vrintr.f16 s8, s8 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[3], r0 +; CHECK-NEXT: vmov.u16 r0, q0[4] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vrintr.f16 s8, s8 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[4], r0 +; CHECK-NEXT: vmov.u16 r0, q0[5] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vrintr.f16 s8, s8 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[5], r0 +; CHECK-NEXT: vmov.u16 r0, q0[6] +; CHECK-NEXT: vmov s8, r0 +; CHECK-NEXT: vrintr.f16 s8, s8 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov.16 q1[6], r0 +; CHECK-NEXT: vmov.u16 r0, q0[7] +; CHECK-NEXT: vmov s0, r0 +; CHECK-NEXT: vrintr.f16 s0, s0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: vmov.16 q1[7], r0 +; CHECK-NEXT: vmov q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.nearbyint.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <4 x float> @ffloor_float32_t(<4 x float> %src) { +; CHECK-LABEL: ffloor_float32_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrintm.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.floor.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @ffloor_float16_t(<8 x half> %src) { +; CHECK-LABEL: ffloor_float16_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrintm.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.floor.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <4 x float> @fround_float32_t(<4 x float> %src) { +; CHECK-LABEL: fround_float32_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrinta.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <4 x float> @llvm.round.v4f32(<4 x float> %src) + ret <4 x float> %0 +} + +define arm_aapcs_vfpcc <8 x half> @fround_float16_t(<8 x half> %src) { +; CHECK-LABEL: fround_float16_t: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vrinta.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = call fast <8 x half> @llvm.round.v8f16(<8 x half> %src) + ret <8 x half> %0 +} + +declare <4 x float> @llvm.ceil.v4f32(<4 x float>) +declare <4 x float> @llvm.trunc.v4f32(<4 x float>) +declare <4 x float> @llvm.rint.v4f32(<4 x float>) +declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) +declare <4 x float> @llvm.floor.v4f32(<4 x float>) +declare <4 x float> @llvm.round.v4f32(<4 x float>) +declare <8 x half> @llvm.ceil.v8f16(<8 x half>) +declare <8 x half> @llvm.trunc.v8f16(<8 x half>) +declare <8 x half> @llvm.rint.v8f16(<8 x half>) +declare <8 x half> @llvm.nearbyint.v8f16(<8 x half>) +declare <8 x half> @llvm.floor.v8f16(<8 x half>) +declare <8 x half> @llvm.round.v8f16(<8 x half>)