diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -840,6 +840,8 @@ EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown = false) const override; + Align getPrefLoopAlignment(MachineLoop *ML) const override; + private: /// Keep a pointer to the AArch64Subtarget around so that we can /// make the right decision when generating code for different targets. diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -38,6 +38,7 @@ #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RuntimeLibcalls.h" @@ -8525,6 +8526,12 @@ return TargetLowering::getAsmOperandValueType(DL, Ty, AllowUnknown); } +Align AArch64TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { + // TODO Investigate how to make this better, this is not ready to commit + ML->getTopBlock()->setMaxBytesForAlignment(8); + return Align(32); +} + /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. void AArch64TargetLowering::LowerAsmOperandForConstraint( diff --git a/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes.ll b/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes.ll @@ -0,0 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-none-linux-gnu < %s -o -| FileCheck %s + +define i32 @a(i32 %x, i32* nocapture readonly %y, i32* nocapture readonly %z) { +; CHECK-LABEL: a: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: cmp w0, #1 +; CHECK-NEXT: b.lt .LBB0_3 +; CHECK-NEXT: // %bb.1: // %for.body.preheader +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: cmp w0, #7 +; CHECK-NEXT: b.hi .LBB0_4 +; CHECK-NEXT: // %bb.2: +; CHECK-NEXT: mov x9, xzr +; CHECK-NEXT: mov w0, wzr +; CHECK-NEXT: b .LBB0_7 +; CHECK-NEXT: .LBB0_3: +; CHECK-NEXT: mov w0, wzr +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB0_4: // %vector.ph +; CHECK-NEXT: and x9, x8, #0xfffffff8 +; CHECK-NEXT: add x10, x2, #16 +; CHECK-NEXT: movi v0.2d, #0000000000000000 +; CHECK-NEXT: add x11, x1, #16 +; CHECK-NEXT: movi v1.2d, #0000000000000000 +; CHECK-NEXT: mov x12, x9 +; CHECK-NEXT: .p2align 5, 0x0, 8 +; CHECK-NEXT: .LBB0_5: // %vector.body +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: ldp q2, q3, [x11, #-16] +; CHECK-NEXT: subs x12, x12, #8 +; CHECK-NEXT: add x11, x11, #32 +; CHECK-NEXT: add v0.4s, v2.4s, v0.4s +; CHECK-NEXT: ldp q4, q2, [x10, #-16] +; CHECK-NEXT: add v1.4s, v3.4s, v1.4s +; CHECK-NEXT: add x10, x10, #32 +; CHECK-NEXT: add v0.4s, v0.4s, v4.4s +; CHECK-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-NEXT: b.ne .LBB0_5 +; CHECK-NEXT: // %bb.6: // %middle.block +; CHECK-NEXT: add v0.4s, v1.4s, v0.4s +; CHECK-NEXT: cmp x9, x8 +; CHECK-NEXT: addv s0, v0.4s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: b.eq .LBB0_9 +; CHECK-NEXT: .LBB0_7: // %for.body.preheader1 +; CHECK-NEXT: lsl x10, x9, #2 +; CHECK-NEXT: sub x8, x8, x9 +; CHECK-NEXT: add x9, x2, x10 +; CHECK-NEXT: add x10, x1, x10 +; CHECK-NEXT: .p2align 5, 0x0, 8 +; CHECK-NEXT: .LBB0_8: // %for.body +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: ldr w11, [x10], #4 +; CHECK-NEXT: ldr w12, [x9], #4 +; CHECK-NEXT: subs x8, x8, #1 +; CHECK-NEXT: add w11, w11, w0 +; CHECK-NEXT: add w0, w11, w12 +; CHECK-NEXT: b.ne .LBB0_8 +; CHECK-NEXT: .LBB0_9: // %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %cmp10 = icmp sgt i32 %x, 0 + br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup + +for.body.preheader: ; preds = %entry + %wide.trip.count = zext i32 %x to i64 + %min.iters.check = icmp ult i32 %x, 8 + br i1 %min.iters.check, label %for.body.preheader17, label %vector.ph + +vector.ph: ; preds = %for.body.preheader + %n.vec = and i64 %wide.trip.count, 4294967288 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %10, %vector.body ] + %vec.phi13 = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %11, %vector.body ] + %0 = getelementptr inbounds i32, i32* %y, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = getelementptr inbounds i32, i32* %0, i64 4 + %3 = bitcast i32* %2 to <4 x i32>* + %wide.load14 = load <4 x i32>, <4 x i32>* %3, align 4 + %4 = getelementptr inbounds i32, i32* %z, i64 %index + %5 = bitcast i32* %4 to <4 x i32>* + %wide.load15 = load <4 x i32>, <4 x i32>* %5, align 4 + %6 = getelementptr inbounds i32, i32* %4, i64 4 + %7 = bitcast i32* %6 to <4 x i32>* + %wide.load16 = load <4 x i32>, <4 x i32>* %7, align 4 + %8 = add <4 x i32> %wide.load, %vec.phi + %9 = add <4 x i32> %wide.load14, %vec.phi13 + %10 = add <4 x i32> %8, %wide.load15 + %11 = add <4 x i32> %9, %wide.load16 + %index.next = add nuw i64 %index, 8 + %12 = icmp eq i64 %index.next, %n.vec + br i1 %12, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %bin.rdx = add <4 x i32> %11, %10 + %13 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %bin.rdx) + %cmp.n = icmp eq i64 %n.vec, %wide.trip.count + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader17 + +for.body.preheader17: ; preds = %for.body.preheader, %middle.block + %indvars.iv.ph = phi i64 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ] + %b.011.ph = phi i32 [ 0, %for.body.preheader ], [ %13, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block, %entry + %b.0.lcssa = phi i32 [ 0, %entry ], [ %13, %middle.block ], [ %add3, %for.body ] + ret i32 %b.0.lcssa + +for.body: ; preds = %for.body.preheader17, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader17 ] + %b.011 = phi i32 [ %add3, %for.body ], [ %b.011.ph, %for.body.preheader17 ] + %arrayidx = getelementptr inbounds i32, i32* %y, i64 %indvars.iv + %14 = load i32, i32* %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32, i32* %z, i64 %indvars.iv + %15 = load i32, i32* %arrayidx2, align 4 + %add = add i32 %14, %b.011 + %add3 = add i32 %add, %15 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)