Index: llvm/lib/CodeGen/ExpandReductions.cpp =================================================================== --- llvm/lib/CodeGen/ExpandReductions.cpp +++ llvm/lib/CodeGen/ExpandReductions.cpp @@ -85,6 +85,13 @@ default: break; case Intrinsic::experimental_vector_reduce_v2_fadd: case Intrinsic::experimental_vector_reduce_v2_fmul: + if (!II->getFastMathFlags().allowReassoc()) { + // The backend currently doesn't support legalizing ordered + // vector reductions, so force them to be expanded now. + Worklist.push_back(II); + break; + } + LLVM_FALLTHROUGH; case Intrinsic::experimental_vector_reduce_add: case Intrinsic::experimental_vector_reduce_mul: case Intrinsic::experimental_vector_reduce_and: Index: llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll @@ -0,0 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK + +; Same as vecreduce-fadd-legalization.ll, but without fmf. + +declare half @llvm.experimental.vector.reduce.v2.fadd.f16.v1f16(half, <1 x half>) +declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v1f32(float, <1 x float>) +declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double, <1 x double>) +declare fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v1f128(fp128, <1 x fp128>) + +declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v3f32(float, <3 x float>) +declare fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v2f128(fp128, <2 x fp128>) +declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float, <16 x float>) + +define half @test_v1f16(<1 x half> %a) nounwind { +; CHECK-LABEL: test_v1f16: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvt s0, h0 +; CHECK-NEXT: fmov s1, wzr +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: fcvt h0, s0 +; CHECK-NEXT: ret + %b = call half @llvm.experimental.vector.reduce.v2.fadd.f16.v1f16(half 0.0, <1 x half> %a) + ret half %b +} + +define float @test_v1f32(<1 x float> %a) nounwind { +; CHECK-LABEL: test_v1f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: fmov s1, wzr +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: ret + %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v1f32(float 0.0, <1 x float> %a) + ret float %b +} + +define double @test_v1f64(<1 x double> %a) nounwind { +; CHECK-LABEL: test_v1f64: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d1, xzr +; CHECK-NEXT: fadd d0, d0, d1 +; CHECK-NEXT: ret + %b = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double 0.0, <1 x double> %a) + ret double %b +} + +define fp128 @test_v1f128(<1 x fp128> %a) nounwind { +; CHECK-LABEL: test_v1f128: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: bl __addtf3 +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %b = call fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a) + ret fp128 %b +} + +define float @test_v3f32(<3 x float> %a) nounwind { +; CHECK-LABEL: test_v3f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov s1, wzr +; CHECK-NEXT: mov s2, v0.s[1] +; CHECK-NEXT: fadd s1, s0, s1 +; CHECK-NEXT: fadd s1, s1, s2 +; CHECK-NEXT: mov s0, v0.s[2] +; CHECK-NEXT: fadd s0, s1, s0 +; CHECK-NEXT: ret + %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v3f32(float 0.0, <3 x float> %a) + ret float %b +} + +define fp128 @test_v2f128(<2 x fp128> %a) nounwind { +; CHECK-LABEL: test_v2f128: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: adrp x8, .LCPI5_0 +; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI5_0] +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: bl __addtf3 +; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-NEXT: bl __addtf3 +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: ret + %b = call fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a) + ret fp128 %b +} + +define float @test_v16f32(<16 x float> %a) nounwind { +; CHECK-LABEL: test_v16f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov s4, wzr +; CHECK-NEXT: mov s5, v0.s[1] +; CHECK-NEXT: fadd s4, s0, s4 +; CHECK-NEXT: fadd s4, s4, s5 +; CHECK-NEXT: mov s5, v0.s[2] +; CHECK-NEXT: mov s0, v0.s[3] +; CHECK-NEXT: fadd s4, s4, s5 +; CHECK-NEXT: fadd s0, s4, s0 +; CHECK-NEXT: mov s5, v1.s[1] +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mov s4, v1.s[2] +; CHECK-NEXT: fadd s0, s0, s5 +; CHECK-NEXT: mov s1, v1.s[3] +; CHECK-NEXT: fadd s0, s0, s4 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mov s5, v2.s[1] +; CHECK-NEXT: fadd s0, s0, s2 +; CHECK-NEXT: mov s4, v2.s[2] +; CHECK-NEXT: fadd s0, s0, s5 +; CHECK-NEXT: mov s1, v2.s[3] +; CHECK-NEXT: fadd s0, s0, s4 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mov s2, v3.s[1] +; CHECK-NEXT: fadd s0, s0, s3 +; CHECK-NEXT: mov s5, v3.s[2] +; CHECK-NEXT: fadd s0, s0, s2 +; CHECK-NEXT: fadd s0, s0, s5 +; CHECK-NEXT: mov s1, v3.s[3] +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: ret + %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float 0.0, <16 x float> %a) + ret float %b +}