diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -201,7 +201,21 @@ bool &AllowPromotionWithoutCommonHeader); bool shouldExpandReduction(const IntrinsicInst *II) const { - return false; + switch (II->getIntrinsicID()) { + case Intrinsic::experimental_vector_reduce_v2_fadd: + case Intrinsic::experimental_vector_reduce_v2_fmul: + // We don't have legalization support for ordered FP reductions. + return !II->getFastMathFlags().allowReassoc(); + + case Intrinsic::experimental_vector_reduce_fmax: + case Intrinsic::experimental_vector_reduce_fmin: + // Lowering asserts that there are no NaNs. + return !II->getFastMathFlags().noNaNs(); + + default: + // Don't expand anything else, let legalization deal with it. + return false; + } } unsigned getGISelRematGlobalCost() const { diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h @@ -170,7 +170,16 @@ TTI::ReductionFlags Flags) const; bool shouldExpandReduction(const IntrinsicInst *II) const { - return false; + switch (II->getIntrinsicID()) { + case Intrinsic::experimental_vector_reduce_v2_fadd: + case Intrinsic::experimental_vector_reduce_v2_fmul: + // We don't have legalization support for ordered FP reductions. + return !II->getFastMathFlags().allowReassoc(); + + default: + // Don't expand anything else, let legalization deal with it. + return false; + } } int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll @@ -0,0 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK + +; Same as vecreduce-fadd-legalization.ll, but without fmf. + +declare half @llvm.experimental.vector.reduce.v2.fadd.f16.v1f16(half, <1 x half>) +declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v1f32(float, <1 x float>) +declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double, <1 x double>) +declare fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v1f128(fp128, <1 x fp128>) + +declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v3f32(float, <3 x float>) +declare fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v2f128(fp128, <2 x fp128>) +declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float, <16 x float>) + +define half @test_v1f16(<1 x half> %a) nounwind { +; CHECK-LABEL: test_v1f16: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvt s0, h0 +; CHECK-NEXT: fmov s1, wzr +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: fcvt h0, s0 +; CHECK-NEXT: ret + %b = call half @llvm.experimental.vector.reduce.v2.fadd.f16.v1f16(half 0.0, <1 x half> %a) + ret half %b +} + +define float @test_v1f32(<1 x float> %a) nounwind { +; CHECK-LABEL: test_v1f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: fmov s1, wzr +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: ret + %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v1f32(float 0.0, <1 x float> %a) + ret float %b +} + +define double @test_v1f64(<1 x double> %a) nounwind { +; CHECK-LABEL: test_v1f64: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d1, xzr +; CHECK-NEXT: fadd d0, d0, d1 +; CHECK-NEXT: ret + %b = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double 0.0, <1 x double> %a) + ret double %b +} + +define fp128 @test_v1f128(<1 x fp128> %a) nounwind { +; CHECK-LABEL: test_v1f128: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: bl __addtf3 +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %b = call fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a) + ret fp128 %b +} + +define float @test_v3f32(<3 x float> %a) nounwind { +; CHECK-LABEL: test_v3f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov s1, wzr +; CHECK-NEXT: mov s2, v0.s[1] +; CHECK-NEXT: fadd s1, s0, s1 +; CHECK-NEXT: fadd s1, s1, s2 +; CHECK-NEXT: mov s0, v0.s[2] +; CHECK-NEXT: fadd s0, s1, s0 +; CHECK-NEXT: ret + %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v3f32(float 0.0, <3 x float> %a) + ret float %b +} + +define fp128 @test_v2f128(<2 x fp128> %a) nounwind { +; CHECK-LABEL: test_v2f128: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: adrp x8, .LCPI5_0 +; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI5_0] +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: bl __addtf3 +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: bl __addtf3 +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %b = call fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a) + ret fp128 %b +} + +define float @test_v16f32(<16 x float> %a) nounwind { +; CHECK-LABEL: test_v16f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov s4, wzr +; CHECK-NEXT: mov s5, v0.s[1] +; CHECK-NEXT: fadd s4, s0, s4 +; CHECK-NEXT: fadd s4, s4, s5 +; CHECK-NEXT: mov s5, v0.s[2] +; CHECK-NEXT: mov s0, v0.s[3] +; CHECK-NEXT: fadd s4, s4, s5 +; CHECK-NEXT: fadd s0, s4, s0 +; CHECK-NEXT: mov s5, v1.s[1] +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mov s4, v1.s[2] +; CHECK-NEXT: fadd s0, s0, s5 +; CHECK-NEXT: mov s1, v1.s[3] +; CHECK-NEXT: fadd s0, s0, s4 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mov s5, v2.s[1] +; CHECK-NEXT: fadd s0, s0, s2 +; CHECK-NEXT: mov s4, v2.s[2] +; CHECK-NEXT: fadd s0, s0, s5 +; CHECK-NEXT: mov s1, v2.s[3] +; CHECK-NEXT: fadd s0, s0, s4 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mov s2, v3.s[1] +; CHECK-NEXT: fadd s0, s0, s3 +; CHECK-NEXT: mov s5, v3.s[2] +; CHECK-NEXT: fadd s0, s0, s2 +; CHECK-NEXT: fadd s0, s0, s5 +; CHECK-NEXT: mov s1, v3.s[3] +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: ret + %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float 0.0, <16 x float> %a) + ret float %b +} diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization-nan.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization-nan.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization-nan.ll @@ -0,0 +1,88 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK + +declare half @llvm.experimental.vector.reduce.fmax.v1f16(<1 x half> %a) +declare float @llvm.experimental.vector.reduce.fmax.v1f32(<1 x float> %a) +declare double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %a) +declare fp128 @llvm.experimental.vector.reduce.fmax.v1f128(<1 x fp128> %a) + +declare float @llvm.experimental.vector.reduce.fmax.v3f32(<3 x float> %a) +declare fp128 @llvm.experimental.vector.reduce.fmax.v2f128(<2 x fp128> %a) +declare float @llvm.experimental.vector.reduce.fmax.v16f32(<16 x float> %a) + +define half @test_v1f16(<1 x half> %a) nounwind { +; CHECK-LABEL: test_v1f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %b = call half @llvm.experimental.vector.reduce.fmax.v1f16(<1 x half> %a) + ret half %b +} + +define float @test_v1f32(<1 x float> %a) nounwind { +; CHECK-LABEL: test_v1f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-NEXT: ret + %b = call float @llvm.experimental.vector.reduce.fmax.v1f32(<1 x float> %a) + ret float %b +} + +define double @test_v1f64(<1 x double> %a) nounwind { +; CHECK-LABEL: test_v1f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %b = call double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %a) + ret double %b +} + +define fp128 @test_v1f128(<1 x fp128> %a) nounwind { +; CHECK-LABEL: test_v1f128: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %b = call fp128 @llvm.experimental.vector.reduce.fmax.v1f128(<1 x fp128> %a) + ret fp128 %b +} + +; TODO: This doesn't work, because ExpandReductions only supports power of two +; unordered reductions. +;define float @test_v3f32(<3 x float> %a) nounwind { +; %b = call float @llvm.experimental.vector.reduce.fmax.v3f32(<3 x float> %a) +; ret float %b +;} + +define fp128 @test_v2f128(<2 x fp128> %a) nounwind { +; CHECK-LABEL: test_v2f128: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; CHECK-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill +; CHECK-NEXT: bl __gttf2 +; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: b.le .LBB4_2 +; CHECK-NEXT: // %bb.1: +; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: .LBB4_2: +; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: ret + %b = call fp128 @llvm.experimental.vector.reduce.fmax.v2f128(<2 x fp128> %a) + ret fp128 %b +} + +define float @test_v16f32(<16 x float> %a) nounwind { +; CHECK-LABEL: test_v16f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmaxnm v1.4s, v1.4s, v3.4s +; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v2.4s +; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v1.4s +; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v1.4s +; CHECK-NEXT: dup v1.4s, v0.s[1] +; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v1.4s +; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-NEXT: ret + %b = call float @llvm.experimental.vector.reduce.fmax.v16f32(<16 x float> %a) + ret float %b +} diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll @@ -0,0 +1,114 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK + +; Same as vecreduce-fmul-legalization.ll, but without fmf. + +declare half @llvm.experimental.vector.reduce.v2.fmul.f16.v1f16(half, <1 x half>) +declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v1f32(float, <1 x float>) +declare double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double, <1 x double>) +declare fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v1f128(fp128, <1 x fp128>) + +declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v3f32(float, <3 x float>) +declare fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v2f128(fp128, <2 x fp128>) +declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v16f32(float, <16 x float>) + +define half @test_v1f16(<1 x half> %a) nounwind { +; CHECK-LABEL: test_v1f16: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvt s0, h0 +; CHECK-NEXT: fmov s1, wzr +; CHECK-NEXT: fmul s0, s0, s1 +; CHECK-NEXT: fcvt h0, s0 +; CHECK-NEXT: ret + %b = call half @llvm.experimental.vector.reduce.v2.fmul.f16.v1f16(half 0.0, <1 x half> %a) + ret half %b +} + +define float @test_v1f32(<1 x float> %a) nounwind { +; CHECK-LABEL: test_v1f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: fmov s1, wzr +; CHECK-NEXT: fmul s0, s1, v0.s[0] +; CHECK-NEXT: ret + %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v1f32(float 0.0, <1 x float> %a) + ret float %b +} + +define double @test_v1f64(<1 x double> %a) nounwind { +; CHECK-LABEL: test_v1f64: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d1, xzr +; CHECK-NEXT: fmul d0, d0, d1 +; CHECK-NEXT: ret + %b = call double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double 0.0, <1 x double> %a) + ret double %b +} + +define fp128 @test_v1f128(<1 x fp128> %a) nounwind { +; CHECK-LABEL: test_v1f128: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: bl __multf3 +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %b = call fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a) + ret fp128 %b +} + +define float @test_v3f32(<3 x float> %a) nounwind { +; CHECK-LABEL: test_v3f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov s1, wzr +; CHECK-NEXT: fmul s1, s1, v0.s[0] +; CHECK-NEXT: fmul s1, s1, v0.s[1] +; CHECK-NEXT: fmul s0, s1, v0.s[2] +; CHECK-NEXT: ret + %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v3f32(float 0.0, <3 x float> %a) + ret float %b +} + +define fp128 @test_v2f128(<2 x fp128> %a) nounwind { +; CHECK-LABEL: test_v2f128: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: adrp x8, .LCPI5_0 +; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI5_0] +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: bl __multf3 +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: bl __multf3 +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %b = call fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a) + ret fp128 %b +} + +define float @test_v16f32(<16 x float> %a) nounwind { +; CHECK-LABEL: test_v16f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov s4, wzr +; CHECK-NEXT: fmul s4, s4, v0.s[0] +; CHECK-NEXT: fmul s4, s4, v0.s[1] +; CHECK-NEXT: fmul s4, s4, v0.s[2] +; CHECK-NEXT: fmul s0, s4, v0.s[3] +; CHECK-NEXT: fmul s0, s0, v1.s[0] +; CHECK-NEXT: fmul s0, s0, v1.s[1] +; CHECK-NEXT: fmul s0, s0, v1.s[2] +; CHECK-NEXT: fmul s0, s0, v1.s[3] +; CHECK-NEXT: fmul s0, s0, v2.s[0] +; CHECK-NEXT: fmul s0, s0, v2.s[1] +; CHECK-NEXT: fmul s0, s0, v2.s[2] +; CHECK-NEXT: fmul s0, s0, v2.s[3] +; CHECK-NEXT: fmul s0, s0, v3.s[0] +; CHECK-NEXT: fmul s0, s0, v3.s[1] +; CHECK-NEXT: fmul s0, s0, v3.s[2] +; CHECK-NEXT: fmul s0, s0, v3.s[3] +; CHECK-NEXT: ret + %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v16f32(float 0.0, <16 x float> %a) + ret float %b +} diff --git a/llvm/test/CodeGen/ARM/vecreduce-fadd-legalization-strict.ll b/llvm/test/CodeGen/ARM/vecreduce-fadd-legalization-strict.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/ARM/vecreduce-fadd-legalization-strict.ll @@ -0,0 +1,166 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+neon | FileCheck %s --check-prefix=CHECK + +declare half @llvm.experimental.vector.reduce.v2.fadd.f16.v1f16(half, <1 x half>) +declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v1f32(float, <1 x float>) +declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double, <1 x double>) +declare fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v1f128(fp128, <1 x fp128>) + +declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v3f32(float, <3 x float>) +declare fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v2f128(fp128, <2 x fp128>) +declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float, <16 x float>) + +define half @test_v1f16(<1 x half> %a) nounwind { +; CHECK-LABEL: test_v1f16: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl __aeabi_f2h +; CHECK-NEXT: bl __aeabi_h2f +; CHECK-NEXT: vldr s0, .LCPI0_0 +; CHECK-NEXT: vmov s2, r0 +; CHECK-NEXT: vadd.f32 s0, s2, s0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: pop {r11, lr} +; CHECK-NEXT: mov pc, lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI0_0: +; CHECK-NEXT: .long 0 @ float 0 + %b = call half @llvm.experimental.vector.reduce.v2.fadd.f16.v1f16(half 0.0, <1 x half> %a) + ret half %b +} + +define float @test_v1f32(<1 x float> %a) nounwind { +; CHECK-LABEL: test_v1f32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s0, .LCPI1_0 +; CHECK-NEXT: vmov s2, r0 +; CHECK-NEXT: vadd.f32 s0, s2, s0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: mov pc, lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI1_0: +; CHECK-NEXT: .long 0 @ float 0 + %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v1f32(float 0.0, <1 x float> %a) + ret float %b +} + +define double @test_v1f64(<1 x double> %a) nounwind { +; CHECK-LABEL: test_v1f64: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.i32 d16, #0x0 +; CHECK-NEXT: vmov d17, r0, r1 +; CHECK-NEXT: vadd.f64 d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: mov pc, lr + %b = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double 0.0, <1 x double> %a) + ret double %b +} + +define fp128 @test_v1f128(<1 x fp128> %a) nounwind { +; CHECK-LABEL: test_v1f128: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: mov r12, #0 +; CHECK-NEXT: str r12, [sp] +; CHECK-NEXT: str r12, [sp, #4] +; CHECK-NEXT: str r12, [sp, #8] +; CHECK-NEXT: str r12, [sp, #12] +; CHECK-NEXT: bl __addtf3 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: pop {r11, lr} +; CHECK-NEXT: mov pc, lr + %b = call fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a) + ret fp128 %b +} + +define float @test_v3f32(<3 x float> %a) nounwind { +; CHECK-LABEL: test_v3f32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d3, r2, r3 +; CHECK-NEXT: vldr s0, .LCPI4_0 +; CHECK-NEXT: vmov d2, r0, r1 +; CHECK-NEXT: vadd.f32 s0, s4, s0 +; CHECK-NEXT: vadd.f32 s0, s0, s5 +; CHECK-NEXT: vadd.f32 s0, s0, s6 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: mov pc, lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI4_0: +; CHECK-NEXT: .long 0 @ float 0 + %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v3f32(float 0.0, <3 x float> %a) + ret float %b +} + +define fp128 @test_v2f128(<2 x fp128> %a) nounwind { +; CHECK-LABEL: test_v2f128: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, r5, r11, lr} +; CHECK-NEXT: push {r4, r5, r11, lr} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: mov r12, #0 +; CHECK-NEXT: str r12, [sp] +; CHECK-NEXT: str r12, [sp, #4] +; CHECK-NEXT: str r12, [sp, #8] +; CHECK-NEXT: str r12, [sp, #12] +; CHECK-NEXT: bl __addtf3 +; CHECK-NEXT: ldr r12, [sp, #36] +; CHECK-NEXT: ldr lr, [sp, #32] +; CHECK-NEXT: ldr r4, [sp, #40] +; CHECK-NEXT: ldr r5, [sp, #44] +; CHECK-NEXT: str lr, [sp] +; CHECK-NEXT: str r12, [sp, #4] +; CHECK-NEXT: str r4, [sp, #8] +; CHECK-NEXT: str r5, [sp, #12] +; CHECK-NEXT: bl __addtf3 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: pop {r4, r5, r11, lr} +; CHECK-NEXT: mov pc, lr + %b = call fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a) + ret fp128 %b +} + +define float @test_v16f32(<16 x float> %a) nounwind { +; CHECK-LABEL: test_v16f32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d3, r2, r3 +; CHECK-NEXT: vldr s0, .LCPI6_0 +; CHECK-NEXT: vmov d2, r0, r1 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vadd.f32 s0, s4, s0 +; CHECK-NEXT: vadd.f32 s0, s0, s5 +; CHECK-NEXT: vadd.f32 s0, s0, s6 +; CHECK-NEXT: vadd.f32 s0, s0, s7 +; CHECK-NEXT: vld1.64 {d2, d3}, [r0] +; CHECK-NEXT: add r0, sp, #16 +; CHECK-NEXT: vadd.f32 s0, s0, s4 +; CHECK-NEXT: vadd.f32 s0, s0, s5 +; CHECK-NEXT: vadd.f32 s0, s0, s6 +; CHECK-NEXT: vadd.f32 s0, s0, s7 +; CHECK-NEXT: vld1.64 {d2, d3}, [r0] +; CHECK-NEXT: add r0, sp, #32 +; CHECK-NEXT: vadd.f32 s0, s0, s4 +; CHECK-NEXT: vadd.f32 s0, s0, s5 +; CHECK-NEXT: vadd.f32 s0, s0, s6 +; CHECK-NEXT: vadd.f32 s0, s0, s7 +; CHECK-NEXT: vld1.64 {d2, d3}, [r0] +; CHECK-NEXT: vadd.f32 s0, s0, s4 +; CHECK-NEXT: vadd.f32 s0, s0, s5 +; CHECK-NEXT: vadd.f32 s0, s0, s6 +; CHECK-NEXT: vadd.f32 s0, s0, s7 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: mov pc, lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI6_0: +; CHECK-NEXT: .long 0 @ float 0 + %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float 0.0, <16 x float> %a) + ret float %b +} diff --git a/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll b/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll @@ -0,0 +1,166 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+neon | FileCheck %s --check-prefix=CHECK + +declare half @llvm.experimental.vector.reduce.v2.fmul.f16.v1f16(half, <1 x half>) +declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v1f32(float, <1 x float>) +declare double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double, <1 x double>) +declare fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v1f128(fp128, <1 x fp128>) + +declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v3f32(float, <3 x float>) +declare fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v2f128(fp128, <2 x fp128>) +declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v16f32(float, <16 x float>) + +define half @test_v1f16(<1 x half> %a) nounwind { +; CHECK-LABEL: test_v1f16: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl __aeabi_f2h +; CHECK-NEXT: bl __aeabi_h2f +; CHECK-NEXT: vldr s0, .LCPI0_0 +; CHECK-NEXT: vmov s2, r0 +; CHECK-NEXT: vmul.f32 s0, s2, s0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: pop {r11, lr} +; CHECK-NEXT: mov pc, lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI0_0: +; CHECK-NEXT: .long 0 @ float 0 + %b = call half @llvm.experimental.vector.reduce.v2.fmul.f16.v1f16(half 0.0, <1 x half> %a) + ret half %b +} + +define float @test_v1f32(<1 x float> %a) nounwind { +; CHECK-LABEL: test_v1f32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s0, .LCPI1_0 +; CHECK-NEXT: vmov s2, r0 +; CHECK-NEXT: vmul.f32 s0, s2, s0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: mov pc, lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI1_0: +; CHECK-NEXT: .long 0 @ float 0 + %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v1f32(float 0.0, <1 x float> %a) + ret float %b +} + +define double @test_v1f64(<1 x double> %a) nounwind { +; CHECK-LABEL: test_v1f64: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.i32 d16, #0x0 +; CHECK-NEXT: vmov d17, r0, r1 +; CHECK-NEXT: vmul.f64 d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: mov pc, lr + %b = call double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double 0.0, <1 x double> %a) + ret double %b +} + +define fp128 @test_v1f128(<1 x fp128> %a) nounwind { +; CHECK-LABEL: test_v1f128: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: mov r12, #0 +; CHECK-NEXT: str r12, [sp] +; CHECK-NEXT: str r12, [sp, #4] +; CHECK-NEXT: str r12, [sp, #8] +; CHECK-NEXT: str r12, [sp, #12] +; CHECK-NEXT: bl __multf3 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: pop {r11, lr} +; CHECK-NEXT: mov pc, lr + %b = call fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a) + ret fp128 %b +} + +define float @test_v3f32(<3 x float> %a) nounwind { +; CHECK-LABEL: test_v3f32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d3, r2, r3 +; CHECK-NEXT: vldr s0, .LCPI4_0 +; CHECK-NEXT: vmov d2, r0, r1 +; CHECK-NEXT: vmul.f32 s0, s4, s0 +; CHECK-NEXT: vmul.f32 s0, s0, s5 +; CHECK-NEXT: vmul.f32 s0, s0, s6 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: mov pc, lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI4_0: +; CHECK-NEXT: .long 0 @ float 0 + %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v3f32(float 0.0, <3 x float> %a) + ret float %b +} + +define fp128 @test_v2f128(<2 x fp128> %a) nounwind { +; CHECK-LABEL: test_v2f128: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, r5, r11, lr} +; CHECK-NEXT: push {r4, r5, r11, lr} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: mov r12, #0 +; CHECK-NEXT: str r12, [sp] +; CHECK-NEXT: str r12, [sp, #4] +; CHECK-NEXT: str r12, [sp, #8] +; CHECK-NEXT: str r12, [sp, #12] +; CHECK-NEXT: bl __multf3 +; CHECK-NEXT: ldr r12, [sp, #36] +; CHECK-NEXT: ldr lr, [sp, #32] +; CHECK-NEXT: ldr r4, [sp, #40] +; CHECK-NEXT: ldr r5, [sp, #44] +; CHECK-NEXT: str lr, [sp] +; CHECK-NEXT: str r12, [sp, #4] +; CHECK-NEXT: str r4, [sp, #8] +; CHECK-NEXT: str r5, [sp, #12] +; CHECK-NEXT: bl __multf3 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: pop {r4, r5, r11, lr} +; CHECK-NEXT: mov pc, lr + %b = call fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a) + ret fp128 %b +} + +define float @test_v16f32(<16 x float> %a) nounwind { +; CHECK-LABEL: test_v16f32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d3, r2, r3 +; CHECK-NEXT: vldr s0, .LCPI6_0 +; CHECK-NEXT: vmov d2, r0, r1 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vmul.f32 s0, s4, s0 +; CHECK-NEXT: vmul.f32 s0, s0, s5 +; CHECK-NEXT: vmul.f32 s0, s0, s6 +; CHECK-NEXT: vmul.f32 s0, s0, s7 +; CHECK-NEXT: vld1.64 {d2, d3}, [r0] +; CHECK-NEXT: add r0, sp, #16 +; CHECK-NEXT: vmul.f32 s0, s0, s4 +; CHECK-NEXT: vmul.f32 s0, s0, s5 +; CHECK-NEXT: vmul.f32 s0, s0, s6 +; CHECK-NEXT: vmul.f32 s0, s0, s7 +; CHECK-NEXT: vld1.64 {d2, d3}, [r0] +; CHECK-NEXT: add r0, sp, #32 +; CHECK-NEXT: vmul.f32 s0, s0, s4 +; CHECK-NEXT: vmul.f32 s0, s0, s5 +; CHECK-NEXT: vmul.f32 s0, s0, s6 +; CHECK-NEXT: vmul.f32 s0, s0, s7 +; CHECK-NEXT: vld1.64 {d2, d3}, [r0] +; CHECK-NEXT: vmul.f32 s0, s0, s4 +; CHECK-NEXT: vmul.f32 s0, s0, s5 +; CHECK-NEXT: vmul.f32 s0, s0, s6 +; CHECK-NEXT: vmul.f32 s0, s0, s7 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: mov pc, lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI6_0: +; CHECK-NEXT: .long 0 @ float 0 + %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v16f32(float 0.0, <16 x float> %a) + ret float %b +}