diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll @@ -0,0 +1,320 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s --mattr=+sve -o - | FileCheck %s + +target triple = "aarch64-arm-none-eabi" + +%"class.std::complex" = type { { double, double } } + +; Zero initialized reduction +; +; complex < double > x = 0.0 + 0.0i; // complex SP in C++ +; for (int i = 0; i < 100; ++i) +; x += a[i] * b[i]; +; +define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) { +; CHECK-LABEL: complex_mul_v2f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: cntd x9 +; CHECK-NEXT: mov w11, #100 // =0x64 +; CHECK-NEXT: neg x10, x9 +; CHECK-NEXT: mov x8, xzr +; CHECK-NEXT: and x10, x10, x11 +; CHECK-NEXT: mov z1.d, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: rdvl x11, #2 +; CHECK-NEXT: ptrue p1.b +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: .LBB0_1: // %vector.body +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: add x12, x0, x8 +; CHECK-NEXT: add x13, x1, x8 +; CHECK-NEXT: ld1b { z2.b }, p1/z, [x0, x8] +; CHECK-NEXT: subs x10, x10, x9 +; CHECK-NEXT: ld1d { z3.d }, p0/z, [x12, #1, mul vl] +; CHECK-NEXT: ld1b { z4.b }, p1/z, [x1, x8] +; CHECK-NEXT: ld1d { z5.d }, p0/z, [x13, #1, mul vl] +; CHECK-NEXT: add x8, x8, x11 +; CHECK-NEXT: uzp2 z6.d, z2.d, z3.d +; CHECK-NEXT: uzp1 z2.d, z2.d, z3.d +; CHECK-NEXT: uzp1 z3.d, z4.d, z5.d +; CHECK-NEXT: fmla z0.d, p0/m, z3.d, z2.d +; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z6.d +; CHECK-NEXT: uzp2 z3.d, z4.d, z5.d +; CHECK-NEXT: fmls z0.d, p0/m, z3.d, z6.d +; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z2.d +; CHECK-NEXT: b.ne .LBB0_1 +; CHECK-NEXT: // %bb.2: // %exit.block +; CHECK-NEXT: faddv d0, p0, z0.d +; CHECK-NEXT: faddv d1, p0, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 killed $z1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.vscale.i64() + %1 = shl nuw nsw i64 %0, 1 + %n.mod.vf = urem i64 100, %1 + %n.vec = sub nuw nsw i64 100, %n.mod.vf + %2 = shl nuw nsw i64 %0, 5 + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %lsr.iv31 = phi i64 [ %lsr.iv.next32, %vector.body ], [ %n.vec, %entry ] + %lsr.iv27 = phi i64 [ %lsr.iv.next28, %vector.body ], [ 0, %entry ] + %vec.phi = phi [ zeroinitializer, %entry ], [ %16, %vector.body ] + %vec.phi12 = phi [ zeroinitializer, %entry ], [ %14, %vector.body ] + %scevgep46 = getelementptr i8, ptr %a, i64 %lsr.iv27 + %scevgep47 = getelementptr i8, ptr %b, i64 %lsr.iv27 + %wide.vec = load , ptr %scevgep46, align 8 + %3 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %wide.vec) + %4 = extractvalue { , } %3, 0 + %5 = extractvalue { , } %3, 1 + %wide.vec30 = load , ptr %scevgep47, align 8 + %6 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %wide.vec30) + %7 = extractvalue { , } %6, 0 + %8 = extractvalue { , } %6, 1 + %9 = fmul fast %8, %4 + %10 = fmul fast %7, %5 + %11 = fmul fast %7, %4 + %12 = fadd fast %11, %vec.phi12 + %13 = fmul fast %8, %5 + %14 = fsub fast %12, %13 + %15 = fadd fast %10, %vec.phi + %16 = fadd fast %15, %9 + %lsr.iv.next28 = add i64 %lsr.iv27, %2 + %lsr.iv.next32 = sub i64 %lsr.iv31, %1 + %17 = icmp eq i64 %lsr.iv.next32, 0 + br i1 %17, label %exit.block, label %vector.body + +exit.block: ; preds = %vector.body + %18 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, %14) + %19 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, %16) + %.fca.0.0.insert = insertvalue %"class.std::complex" poison, double %18, 0, 0 + %.fca.0.1.insert = insertvalue %"class.std::complex" %.fca.0.0.insert, double %19, 0, 1 + ret %"class.std::complex" %.fca.0.1.insert +} + +; Fixed value initialized reduction +; +; complex < double > x = 2.0 + 1.0i; // complex SP in C++ +; for (int i = 0; i < 100; ++i) +; x += a[i] * b[i]; +; +define %"class.std::complex" @complex_mul_nonzero_init_v2f64(ptr %a, ptr %b) { +; CHECK-LABEL: complex_mul_nonzero_init_v2f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: cntd x9 +; CHECK-NEXT: fmov d0, #2.00000000 +; CHECK-NEXT: neg x10, x9 +; CHECK-NEXT: mov w11, #100 // =0x64 +; CHECK-NEXT: fmov d1, #1.00000000 +; CHECK-NEXT: mov x8, xzr +; CHECK-NEXT: and x10, x10, x11 +; CHECK-NEXT: mov z2.d, #0 // =0x0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: rdvl x11, #2 +; CHECK-NEXT: sel z0.d, p0, z0.d, z2.d +; CHECK-NEXT: sel z1.d, p0, z1.d, z2.d +; CHECK-NEXT: ptrue p1.b +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: .LBB1_1: // %vector.body +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: add x12, x0, x8 +; CHECK-NEXT: add x13, x1, x8 +; CHECK-NEXT: ld1b { z2.b }, p1/z, [x0, x8] +; CHECK-NEXT: subs x10, x10, x9 +; CHECK-NEXT: ld1d { z3.d }, p0/z, [x12, #1, mul vl] +; CHECK-NEXT: ld1b { z4.b }, p1/z, [x1, x8] +; CHECK-NEXT: ld1d { z5.d }, p0/z, [x13, #1, mul vl] +; CHECK-NEXT: add x8, x8, x11 +; CHECK-NEXT: uzp2 z6.d, z2.d, z3.d +; CHECK-NEXT: uzp1 z2.d, z2.d, z3.d +; CHECK-NEXT: uzp1 z3.d, z4.d, z5.d +; CHECK-NEXT: fmla z0.d, p0/m, z3.d, z2.d +; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z6.d +; CHECK-NEXT: uzp2 z3.d, z4.d, z5.d +; CHECK-NEXT: fmls z0.d, p0/m, z3.d, z6.d +; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z2.d +; CHECK-NEXT: b.ne .LBB1_1 +; CHECK-NEXT: // %bb.2: // %exit.block +; CHECK-NEXT: faddv d0, p0, z0.d +; CHECK-NEXT: faddv d1, p0, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 killed $z1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.vscale.i64() + %1 = shl nuw nsw i64 %0, 1 + %n.mod.vf = urem i64 100, %1 + %n.vec = sub nuw nsw i64 100, %n.mod.vf + %2 = shl nuw nsw i64 %0, 5 + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %lsr.iv31 = phi i64 [ %lsr.iv.next32, %vector.body ], [ %n.vec, %entry ] + %lsr.iv27 = phi i64 [ %lsr.iv.next28, %vector.body ], [ 0, %entry ] + %vec.phi = phi [ insertelement ( zeroinitializer, double 1.000000e+00, i32 0), %entry ], [ %16, %vector.body ] + %vec.phi12 = phi [ insertelement ( zeroinitializer, double 2.000000e+0, i32 0), %entry ], [ %14, %vector.body ] + %scevgep46 = getelementptr i8, ptr %a, i64 %lsr.iv27 + %scevgep47 = getelementptr i8, ptr %b, i64 %lsr.iv27 + %wide.vec = load , ptr %scevgep46, align 8 + %3 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %wide.vec) + %4 = extractvalue { , } %3, 0 + %5 = extractvalue { , } %3, 1 + %wide.vec30 = load , ptr %scevgep47, align 8 + %6 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %wide.vec30) + %7 = extractvalue { , } %6, 0 + %8 = extractvalue { , } %6, 1 + %9 = fmul fast %8, %4 + %10 = fmul fast %7, %5 + %11 = fmul fast %7, %4 + %12 = fadd fast %11, %vec.phi12 + %13 = fmul fast %8, %5 + %14 = fsub fast %12, %13 + %15 = fadd fast %10, %vec.phi + %16 = fadd fast %15, %9 + %lsr.iv.next28 = add i64 %lsr.iv27, %2 + %lsr.iv.next32 = sub i64 %lsr.iv31, %1 + %17 = icmp eq i64 %lsr.iv.next32, 0 + br i1 %17, label %exit.block, label %vector.body + +exit.block: ; preds = %vector.body + %18 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, %14) + %19 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, %16) + %.fca.0.0.insert = insertvalue %"class.std::complex" poison, double %18, 0, 0 + %.fca.0.1.insert = insertvalue %"class.std::complex" %.fca.0.0.insert, double %19, 0, 1 + ret %"class.std::complex" %.fca.0.1.insert +} + +; Loop unrolled with factor 2 +; +define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) { +; CHECK-LABEL: complex_mul_v2f64_unrolled: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: cntw x9 +; CHECK-NEXT: mov w11, #1000 // =0x3e8 +; CHECK-NEXT: neg x10, x9 +; CHECK-NEXT: mov x8, xzr +; CHECK-NEXT: and x10, x10, x11 +; CHECK-NEXT: mov z0.d, #0 // =0x0 +; CHECK-NEXT: rdvl x11, #4 +; CHECK-NEXT: mov z1.d, z0.d +; CHECK-NEXT: mov z2.d, z0.d +; CHECK-NEXT: mov z3.d, z0.d +; CHECK-NEXT: addvl x12, x1, #2 +; CHECK-NEXT: addvl x13, x0, #2 +; CHECK-NEXT: ptrue p1.b +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: .LBB2_1: // %vector.body +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: add x14, x0, x8 +; CHECK-NEXT: add x15, x13, x8 +; CHECK-NEXT: add x16, x1, x8 +; CHECK-NEXT: add x17, x12, x8 +; CHECK-NEXT: ld1b { z4.b }, p1/z, [x0, x8] +; CHECK-NEXT: subs x10, x10, x9 +; CHECK-NEXT: ld1d { z5.d }, p0/z, [x14, #1, mul vl] +; CHECK-NEXT: ld1b { z6.b }, p1/z, [x13, x8] +; CHECK-NEXT: ld1d { z7.d }, p0/z, [x15, #1, mul vl] +; CHECK-NEXT: ld1b { z16.b }, p1/z, [x1, x8] +; CHECK-NEXT: ld1d { z17.d }, p0/z, [x16, #1, mul vl] +; CHECK-NEXT: ld1b { z18.b }, p1/z, [x12, x8] +; CHECK-NEXT: ld1d { z19.d }, p0/z, [x17, #1, mul vl] +; CHECK-NEXT: add x8, x8, x11 +; CHECK-NEXT: uzp2 z20.d, z4.d, z5.d +; CHECK-NEXT: uzp1 z4.d, z4.d, z5.d +; CHECK-NEXT: uzp2 z5.d, z6.d, z7.d +; CHECK-NEXT: uzp1 z6.d, z6.d, z7.d +; CHECK-NEXT: uzp1 z7.d, z16.d, z17.d +; CHECK-NEXT: uzp1 z21.d, z18.d, z19.d +; CHECK-NEXT: fmla z2.d, p0/m, z7.d, z4.d +; CHECK-NEXT: fmla z3.d, p0/m, z21.d, z6.d +; CHECK-NEXT: fmla z0.d, p0/m, z7.d, z20.d +; CHECK-NEXT: fmla z1.d, p0/m, z21.d, z5.d +; CHECK-NEXT: uzp2 z7.d, z16.d, z17.d +; CHECK-NEXT: uzp2 z16.d, z18.d, z19.d +; CHECK-NEXT: fmls z2.d, p0/m, z7.d, z20.d +; CHECK-NEXT: fmls z3.d, p0/m, z16.d, z5.d +; CHECK-NEXT: fmla z0.d, p0/m, z7.d, z4.d +; CHECK-NEXT: fmla z1.d, p0/m, z16.d, z6.d +; CHECK-NEXT: b.ne .LBB2_1 +; CHECK-NEXT: // %bb.2: // %exit.block +; CHECK-NEXT: fadd z2.d, z3.d, z2.d +; CHECK-NEXT: fadd z1.d, z1.d, z0.d +; CHECK-NEXT: faddv d0, p0, z2.d +; CHECK-NEXT: faddv d1, p0, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 killed $z1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.vscale.i64() + %1 = shl nuw nsw i64 %0, 2 + %n.mod.vf = urem i64 1000, %1 + %n.vec = sub i64 1000, %n.mod.vf + %2 = shl nuw nsw i64 %0, 6 + %3 = shl nuw nsw i64 %0, 5 + %scevgep61 = getelementptr i8, ptr %b, i64 %3 + %scevgep63 = getelementptr i8, ptr %a, i64 %3 + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %lsr.iv38 = phi i64 [ %lsr.iv.next39, %vector.body ], [ %n.vec, %entry ] + %lsr.iv34 = phi i64 [ %lsr.iv.next35, %vector.body ], [ 0, %entry ] + %vec.phi = phi [ zeroinitializer, %entry ], [ %30, %vector.body ] + %vec.phi12 = phi [ zeroinitializer, %entry ], [ %31, %vector.body ] + %vec.phi13 = phi [ zeroinitializer, %entry ], [ %26, %vector.body ] + %vec.phi14 = phi [ zeroinitializer, %entry ], [ %27, %vector.body ] + %scevgep57 = getelementptr i8, ptr %a, i64 %lsr.iv34 + %scevgep64 = getelementptr i8, ptr %scevgep63, i64 %lsr.iv34 + %scevgep58 = getelementptr i8, ptr %b, i64 %lsr.iv34 + %scevgep62 = getelementptr i8, ptr %scevgep61, i64 %lsr.iv34 + %wide.vec = load , ptr %scevgep57, align 8 + %wide.vec32 = load , ptr %scevgep64, align 8 + %4 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %wide.vec) + %5 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %wide.vec32) + %6 = extractvalue { , } %4, 0 + %7 = extractvalue { , } %5, 0 + %8 = extractvalue { , } %4, 1 + %9 = extractvalue { , } %5, 1 + %wide.vec34 = load , ptr %scevgep58, align 8 + %wide.vec35 = load , ptr %scevgep62, align 8 + %10 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %wide.vec34) + %11 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %wide.vec35) + %12 = extractvalue { , } %10, 0 + %13 = extractvalue { , } %11, 0 + %14 = extractvalue { , } %10, 1 + %15 = extractvalue { , } %11, 1 + %16 = fmul fast %14, %6 + %17 = fmul fast %15, %7 + %18 = fmul fast %12, %8 + %19 = fmul fast %13, %9 + %20 = fmul fast %12, %6 + %21 = fmul fast %13, %7 + %22 = fadd fast %20, %vec.phi13 + %23 = fadd fast %21, %vec.phi14 + %24 = fmul fast %14, %8 + %25 = fmul fast %15, %9 + %26 = fsub fast %22, %24 + %27 = fsub fast %23, %25 + %28 = fadd fast %18, %vec.phi + %29 = fadd fast %19, %vec.phi12 + %30 = fadd fast %28, %16 + %31 = fadd fast %29, %17 + %lsr.iv.next35 = add i64 %lsr.iv34, %2 + %lsr.iv.next39 = sub i64 %lsr.iv38, %1 + %32 = icmp eq i64 %lsr.iv.next39, 0 + br i1 %32, label %exit.block, label %vector.body + +exit.block: ; preds = %vector.body + %bin.rdx15 = fadd fast %27, %26 + %33 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, %bin.rdx15) + %bin.rdx = fadd fast %31, %30 + %34 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, %bin.rdx) + %.fca.0.0.insert = insertvalue %"class.std::complex" poison, double %33, 0, 0 + %.fca.0.1.insert = insertvalue %"class.std::complex" %.fca.0.0.insert, double %34, 0, 1 + ret %"class.std::complex" %.fca.0.1.insert +} + + +declare i64 @llvm.vscale.i64() +declare { , } @llvm.experimental.vector.deinterleave2.nxv4f64() +declare double @llvm.vector.reduce.fadd.nxv2f64(double, ) diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions.ll @@ -0,0 +1,233 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s --mattr=+complxnum,+neon -o - | FileCheck %s + +target triple = "aarch64-arm-none-eabi" + +%"struct.std::complex" = type { { double, double } } + +; Zero initialized reduction +; +; complex < double > x = 0.0 + 0.0i; // complex SP in C++ +; for (int i = 0; i < 100; ++i) +; x += a[i] * b[i]; +; +define dso_local %"struct.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) { +; CHECK-LABEL: complex_mul_v2f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: movi v1.2d, #0000000000000000 +; CHECK-NEXT: mov x8, xzr +; CHECK-NEXT: movi v0.2d, #0000000000000000 +; CHECK-NEXT: .LBB0_1: // %vector.body +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: add x9, x0, x8 +; CHECK-NEXT: ld2 { v2.2d, v3.2d }, [x9] +; CHECK-NEXT: add x9, x1, x8 +; CHECK-NEXT: add x8, x8, #32 +; CHECK-NEXT: cmp x8, #1600 +; CHECK-NEXT: ld2 { v4.2d, v5.2d }, [x9] +; CHECK-NEXT: fmla v0.2d, v2.2d, v4.2d +; CHECK-NEXT: fmla v1.2d, v3.2d, v4.2d +; CHECK-NEXT: fmls v0.2d, v3.2d, v5.2d +; CHECK-NEXT: fmla v1.2d, v2.2d, v5.2d +; CHECK-NEXT: b.ne .LBB0_1 +; CHECK-NEXT: // %bb.2: // %middle.block +; CHECK-NEXT: faddp d0, v0.2d +; CHECK-NEXT: faddp d1, v1.2d +; CHECK-NEXT: ret +entry: + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %lsr.iv = phi i64 [ %lsr.iv.next, %vector.body ], [ 0, %entry ] + %vec.phi = phi <2 x double> [ zeroinitializer, %entry ], [ %7, %vector.body ] + %vec.phi27 = phi <2 x double> [ zeroinitializer, %entry ], [ %5, %vector.body ] + %scevgep = getelementptr i8, ptr %a, i64 %lsr.iv + %scevgep35 = getelementptr i8, ptr %b, i64 %lsr.iv + %wide.vec = load <4 x double>, ptr %scevgep, align 8 + %strided.vec = shufflevector <4 x double> %wide.vec, <4 x double> poison, <2 x i32> + %strided.vec28 = shufflevector <4 x double> %wide.vec, <4 x double> poison, <2 x i32> + %wide.vec29 = load <4 x double>, ptr %scevgep35, align 8 + %strided.vec30 = shufflevector <4 x double> %wide.vec29, <4 x double> poison, <2 x i32> + %strided.vec31 = shufflevector <4 x double> %wide.vec29, <4 x double> poison, <2 x i32> + %0 = fmul fast <2 x double> %strided.vec31, %strided.vec + %1 = fmul fast <2 x double> %strided.vec30, %strided.vec28 + %2 = fmul fast <2 x double> %strided.vec30, %strided.vec + %3 = fadd fast <2 x double> %2, %vec.phi27 + %4 = fmul fast <2 x double> %strided.vec31, %strided.vec28 + %5 = fsub fast <2 x double> %3, %4 + %6 = fadd fast <2 x double> %1, %vec.phi + %7 = fadd fast <2 x double> %6, %0 + %lsr.iv.next = add nuw nsw i64 %lsr.iv, 32 + %8 = icmp eq i64 %lsr.iv.next, 1600 + br i1 %8, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %9 = tail call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> %5) + %10 = tail call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> %7) + %.fca.0.0.insert = insertvalue %"struct.std::complex" poison, double %9, 0, 0 + %.fca.0.1.insert = insertvalue %"struct.std::complex" %.fca.0.0.insert, double %10, 0, 1 + ret %"struct.std::complex" %.fca.0.1.insert +} + +; Fixed value initialized reduction +; +; complex < double > x = 2.0 + 1.0i; // complex SP in C++ +; for (int i = 0; i < 100; ++i) +; x += a[i] * b[i]; +; +define %"struct.std::complex" @complex_mul_nonzero_init_v2f64(ptr %a, ptr %b) { +; CHECK-LABEL: complex_mul_nonzero_init_v2f64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adrp x9, .LCPI1_0 +; CHECK-NEXT: adrp x10, .LCPI1_1 +; CHECK-NEXT: mov x8, xzr +; CHECK-NEXT: ldr q0, [x9, :lo12:.LCPI1_0] +; CHECK-NEXT: ldr q1, [x10, :lo12:.LCPI1_1] +; CHECK-NEXT: .LBB1_1: // %vector.body +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: add x9, x0, x8 +; CHECK-NEXT: ld2 { v2.2d, v3.2d }, [x9] +; CHECK-NEXT: add x9, x1, x8 +; CHECK-NEXT: add x8, x8, #32 +; CHECK-NEXT: cmp x8, #1600 +; CHECK-NEXT: ld2 { v4.2d, v5.2d }, [x9] +; CHECK-NEXT: fmla v0.2d, v2.2d, v4.2d +; CHECK-NEXT: fmla v1.2d, v3.2d, v4.2d +; CHECK-NEXT: fmls v0.2d, v3.2d, v5.2d +; CHECK-NEXT: fmla v1.2d, v2.2d, v5.2d +; CHECK-NEXT: b.ne .LBB1_1 +; CHECK-NEXT: // %bb.2: // %middle.block +; CHECK-NEXT: faddp d0, v0.2d +; CHECK-NEXT: faddp d1, v1.2d +; CHECK-NEXT: ret +entry: + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %lsr.iv = phi i64 [ %lsr.iv.next, %vector.body ], [ 0, %entry ] + %vec.phi = phi <2 x double> [ , %entry ], [ %7, %vector.body ] + %vec.phi27 = phi <2 x double> [ , %entry ], [ %5, %vector.body ] + %scevgep = getelementptr i8, ptr %a, i64 %lsr.iv + %scevgep35 = getelementptr i8, ptr %b, i64 %lsr.iv + %wide.vec = load <4 x double>, ptr %scevgep, align 8 + %strided.vec = shufflevector <4 x double> %wide.vec, <4 x double> poison, <2 x i32> + %strided.vec28 = shufflevector <4 x double> %wide.vec, <4 x double> poison, <2 x i32> + %wide.vec29 = load <4 x double>, ptr %scevgep35, align 8 + %strided.vec30 = shufflevector <4 x double> %wide.vec29, <4 x double> poison, <2 x i32> + %strided.vec31 = shufflevector <4 x double> %wide.vec29, <4 x double> poison, <2 x i32> + %0 = fmul fast <2 x double> %strided.vec31, %strided.vec + %1 = fmul fast <2 x double> %strided.vec30, %strided.vec28 + %2 = fmul fast <2 x double> %strided.vec30, %strided.vec + %3 = fadd fast <2 x double> %2, %vec.phi27 + %4 = fmul fast <2 x double> %strided.vec31, %strided.vec28 + %5 = fsub fast <2 x double> %3, %4 + %6 = fadd fast <2 x double> %1, %vec.phi + %7 = fadd fast <2 x double> %6, %0 + %lsr.iv.next = add nuw nsw i64 %lsr.iv, 32 + %8 = icmp eq i64 %lsr.iv.next, 1600 + br i1 %8, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %9 = tail call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> %5) + %10 = tail call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> %7) + %.fca.0.0.insert = insertvalue %"struct.std::complex" poison, double %9, 0, 0 + %.fca.0.1.insert = insertvalue %"struct.std::complex" %.fca.0.0.insert, double %10, 0, 1 + ret %"struct.std::complex" %.fca.0.1.insert +} + +; Loop unrolled with factor 2 +; +define %"struct.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) { +; CHECK-LABEL: complex_mul_v2f64_unrolled: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adrp x9, .LCPI2_0 +; CHECK-NEXT: adrp x10, .LCPI2_1 +; CHECK-NEXT: movi v0.2d, #0000000000000000 +; CHECK-NEXT: mov x8, xzr +; CHECK-NEXT: movi v2.2d, #0000000000000000 +; CHECK-NEXT: ldr q3, [x9, :lo12:.LCPI2_0] +; CHECK-NEXT: ldr q1, [x10, :lo12:.LCPI2_1] +; CHECK-NEXT: .LBB2_1: // %vector.body +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: add x9, x0, x8 +; CHECK-NEXT: ld2 { v4.2d, v5.2d }, [x9], #32 +; CHECK-NEXT: ld2 { v6.2d, v7.2d }, [x9] +; CHECK-NEXT: add x9, x1, x8 +; CHECK-NEXT: add x8, x8, #64 +; CHECK-NEXT: cmp x8, #1600 +; CHECK-NEXT: ld2 { v16.2d, v17.2d }, [x9], #32 +; CHECK-NEXT: fmla v3.2d, v4.2d, v16.2d +; CHECK-NEXT: fmla v1.2d, v5.2d, v16.2d +; CHECK-NEXT: fmls v3.2d, v5.2d, v17.2d +; CHECK-NEXT: fmla v1.2d, v4.2d, v17.2d +; CHECK-NEXT: ld2 { v18.2d, v19.2d }, [x9] +; CHECK-NEXT: fmla v2.2d, v6.2d, v18.2d +; CHECK-NEXT: fmla v0.2d, v7.2d, v18.2d +; CHECK-NEXT: fmls v2.2d, v7.2d, v19.2d +; CHECK-NEXT: fmla v0.2d, v6.2d, v19.2d +; CHECK-NEXT: b.ne .LBB2_1 +; CHECK-NEXT: // %bb.2: // %middle.block +; CHECK-NEXT: fadd v2.2d, v2.2d, v3.2d +; CHECK-NEXT: fadd v1.2d, v0.2d, v1.2d +; CHECK-NEXT: faddp d0, v2.2d +; CHECK-NEXT: faddp d1, v1.2d +; CHECK-NEXT: ret +entry: + %scevgep = getelementptr i8, ptr %a, i64 32 + %scevgep49 = getelementptr i8, ptr %b, i64 32 + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %lsr.iv54 = phi i64 [ %lsr.iv.next, %vector.body ], [ 100, %entry ] + %lsr.iv50 = phi ptr [ %scevgep51, %vector.body ], [ %scevgep49, %entry ] + %lsr.iv = phi ptr [ %scevgep48, %vector.body ], [ %scevgep, %entry ] + %vec.phi = phi <2 x double> [ , %entry ], [ %14, %vector.body ] + %vec.phi27 = phi <2 x double> [ zeroinitializer, %entry ], [ %15, %vector.body ] + %vec.phi28 = phi <2 x double> [ , %entry ], [ %10, %vector.body ] + %vec.phi29 = phi <2 x double> [ zeroinitializer, %entry ], [ %11, %vector.body ] + %scevgep52 = getelementptr i8, ptr %lsr.iv, i64 -32 + %scevgep53 = getelementptr i8, ptr %lsr.iv50, i64 -32 + %wide.vec = load <4 x double>, ptr %scevgep52, align 8 + %wide.vec30 = load <4 x double>, ptr %lsr.iv, align 8 + %strided.vec = shufflevector <4 x double> %wide.vec, <4 x double> poison, <2 x i32> + %strided.vec31 = shufflevector <4 x double> %wide.vec30, <4 x double> poison, <2 x i32> + %strided.vec32 = shufflevector <4 x double> %wide.vec, <4 x double> poison, <2 x i32> + %strided.vec33 = shufflevector <4 x double> %wide.vec30, <4 x double> poison, <2 x i32> + %wide.vec34 = load <4 x double>, ptr %scevgep53, align 8 + %wide.vec35 = load <4 x double>, ptr %lsr.iv50, align 8 + %strided.vec36 = shufflevector <4 x double> %wide.vec34, <4 x double> poison, <2 x i32> + %strided.vec37 = shufflevector <4 x double> %wide.vec35, <4 x double> poison, <2 x i32> + %strided.vec38 = shufflevector <4 x double> %wide.vec34, <4 x double> poison, <2 x i32> + %strided.vec39 = shufflevector <4 x double> %wide.vec35, <4 x double> poison, <2 x i32> + %0 = fmul fast <2 x double> %strided.vec38, %strided.vec + %1 = fmul fast <2 x double> %strided.vec39, %strided.vec31 + %2 = fmul fast <2 x double> %strided.vec36, %strided.vec32 + %3 = fmul fast <2 x double> %strided.vec37, %strided.vec33 + %4 = fmul fast <2 x double> %strided.vec36, %strided.vec + %5 = fmul fast <2 x double> %strided.vec37, %strided.vec31 + %6 = fadd fast <2 x double> %4, %vec.phi28 + %7 = fadd fast <2 x double> %5, %vec.phi29 + %8 = fmul fast <2 x double> %strided.vec38, %strided.vec32 + %9 = fmul fast <2 x double> %strided.vec39, %strided.vec33 + %10 = fsub fast <2 x double> %6, %8 + %11 = fsub fast <2 x double> %7, %9 + %12 = fadd fast <2 x double> %2, %vec.phi + %13 = fadd fast <2 x double> %3, %vec.phi27 + %14 = fadd fast <2 x double> %12, %0 + %15 = fadd fast <2 x double> %13, %1 + %scevgep48 = getelementptr i8, ptr %lsr.iv, i64 64 + %scevgep51 = getelementptr i8, ptr %lsr.iv50, i64 64 + %lsr.iv.next = add nsw i64 %lsr.iv54, -4 + %16 = icmp eq i64 %lsr.iv.next, 0 + br i1 %16, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %bin.rdx40 = fadd fast <2 x double> %11, %10 + %17 = tail call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> %bin.rdx40) + %bin.rdx = fadd fast <2 x double> %15, %14 + %18 = tail call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> %bin.rdx) + %.fca.0.0.insert = insertvalue %"struct.std::complex" poison, double %17, 0, 0 + %.fca.0.1.insert = insertvalue %"struct.std::complex" %.fca.0.0.insert, double %18, 0, 1 + ret %"struct.std::complex" %.fca.0.1.insert +} +declare double @llvm.vector.reduce.fadd.v2f64(double, <2 x double>)