Index: lib/CodeGen/MachineScheduler.cpp =================================================================== --- lib/CodeGen/MachineScheduler.cpp +++ lib/CodeGen/MachineScheduler.cpp @@ -3233,6 +3233,12 @@ Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall)) return; + // Keep clustered nodes together. + if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(), + Cand.SU == DAG->getNextClusterSucc(), + TryCand, Cand, Cluster)) + return; + // Avoid critical resource consumption and balance the schedule. if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, TryCand, Cand, ResourceReduce)) Index: lib/Target/AArch64/AArch64MacroFusion.cpp =================================================================== --- lib/Target/AArch64/AArch64MacroFusion.cpp +++ lib/Target/AArch64/AArch64MacroFusion.cpp @@ -232,6 +232,18 @@ dbgs() << DAG->TII->getName(FirstMI->getOpcode()) << " - " << DAG->TII->getName(SecondMI->getOpcode()) << '\n'; ); + if (&SecondSU != &DAG->ExitSU) + // Make instructions dependent on FirstSU also dependent on SecondSU to + // prevent them from being scheduled between FirstSU and and SecondSU. + for (SUnit::const_succ_iterator + SI = FirstSU.Succs.begin(), SE = FirstSU.Succs.end(); + SI != SE; ++SI) { + if (SI->getSUnit() == &SecondSU) + continue; + DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); + DAG->addEdge(SI->getSUnit(), SDep(&SecondSU, SDep::Artificial)); + } + ++NumFused; return true; } Index: lib/Target/AArch64/AArch64TargetMachine.cpp =================================================================== --- lib/Target/AArch64/AArch64TargetMachine.cpp +++ lib/Target/AArch64/AArch64TargetMachine.cpp @@ -280,7 +280,7 @@ ScheduleDAGInstrs * createPostMachineScheduler(MachineSchedContext *C) const override { const AArch64Subtarget &ST = C->MF->getSubtarget(); - if (ST.hasFuseLiterals()) { + if (ST.hasFuseLiterals() || ST.hasFuseAES()) { // Run the Macro Fusion after RA again since literals are expanded from // pseudos then (v. addPreSched2()). ScheduleDAGMI *DAG = createGenericSchedPostRA(C); Index: test/CodeGen/AArch64/misched-fusion-aes.ll =================================================================== --- test/CodeGen/AArch64/misched-fusion-aes.ll +++ test/CodeGen/AArch64/misched-fusion-aes.ll @@ -1,5 +1,5 @@ -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKA57 -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKA72 +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKA5A72 +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKA57A72 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKM1 declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k) @@ -72,47 +72,32 @@ ret void ; CHECK-LABEL: aesea: -; CHECKA57: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57: aese [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VC]] -; CHECKA57: aesmc {{v[0-7].16b}}, [[VA]] -; CHECKA57: aese [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VD]] -; CHECKA57: aesmc {{v[0-7].16b}}, [[VB]] -; CHECKA57: aese [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VE]] -; CHECKA57: aese [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VF]] -; CHECKA57: aese [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VG]] -; CHECKA57: aese [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VH]] -; CHECKA72: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VA]] -; CHECKA72: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VB]] -; CHECKA72: aese [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VC]] -; CHECKA72: aese [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VD]] -; CHECKA72: aese [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VE]] -; CHECKA72: aese [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VF]] -; CHECKA72: aese [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VG]] -; CHECKA72: aese [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VH]] +; CHECKA57A72: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VA]] +; CHECKA57A72: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VB]] +; CHECKA57A72: aese [[VC:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VC]] +; CHECKA57A72: aese [[VD:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VD]] +; CHECKA57A72: aese [[VE:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VE]] +; CHECKA57A72: aese [[VF:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VF]] +; CHECKA57A72: aese [[VG:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VG]] +; CHECKA57A72: aese [[VH:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VH]] ; CHECKM1: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1: aesmc {{v[0-7].16b}}, [[VA]] +; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VA]] +; CHECKM1: aese {{v[0-7].16b}}, {{v[0-7].16b}} ; CHECKM1: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} ; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VB]] ; CHECKM1: aese {{v[0-7].16b}}, {{v[0-7].16b}} ; CHECKM1: aese [[VC:v[0-7].16b]], {{v[0-7].16b}} ; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VC]] ; CHECKM1: aese [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1: aesmc {{v[0-7].16b}}, [[VD]] +; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VD]] ; CHECKM1: aese [[VE:v[0-7].16b]], {{v[0-7].16b}} ; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VE]] ; CHECKM1: aese [[VF:v[0-7].16b]], {{v[0-7].16b}} @@ -120,7 +105,6 @@ ; CHECKM1: aese [[VG:v[0-7].16b]], {{v[0-7].16b}} ; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VG]] ; CHECKM1: aese [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VH]] } define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) { @@ -188,40 +172,25 @@ ret void ; CHECK-LABEL: aesda: -; CHECKA57: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VC]] -; CHECKA57: aesimc {{v[0-7].16b}}, [[VA]] -; CHECKA57: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VD]] -; CHECKA57: aesimc {{v[0-7].16b}}, [[VB]] -; CHECKA57: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VE]] -; CHECKA57: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VF]] -; CHECKA57: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VG]] -; CHECKA57: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VH]] -; CHECKA72: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VA]] -; CHECKA72: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VB]] -; CHECKA72: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VC]] -; CHECKA72: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VD]] -; CHECKA72: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VE]] -; CHECKA72: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VF]] -; CHECKA72: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VG]] -; CHECKA72: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VH]] +; CHECKA57A72: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VA]] +; CHECKA57A72: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VB]] +; CHECKA57A72: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VC]] +; CHECKA57A72: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VD]] +; CHECKA57A72: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VE]] +; CHECKA57A72: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VF]] +; CHECKA57A72: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VG]] +; CHECKA57A72: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}} +; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VH]] ; CHECKM1: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1: aesimc {{v[0-7].16b}}, [[VA]] +; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VA]] +; CHECKM1: aesd {{v[0-7].16b}}, {{v[0-7].16b}} ; CHECKM1: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}} ; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VB]] ; CHECKM1: aesd {{v[0-7].16b}}, {{v[0-7].16b}} @@ -236,5 +205,31 @@ ; CHECKM1: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}} ; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VG]] ; CHECKM1: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VH]] +} + +define void @aes_load_store(<16 x i8> *%p1, <16 x i8> *%p2 , <16 x i8> *%p3) { +entry: + %x1 = alloca <16 x i8>, align 16 + %x2 = alloca <16 x i8>, align 16 + %x3 = alloca <16 x i8>, align 16 + %x4 = alloca <16 x i8>, align 16 + %x5 = alloca <16 x i8>, align 16 + %in1 = load <16 x i8>, <16 x i8>* %p1, align 16 + store <16 x i8> %in1, <16 x i8>* %x1, align 16 + %aese1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in1) #2 + store <16 x i8> %aese1, <16 x i8>* %x2, align 16 + %in2 = load <16 x i8>, <16 x i8>* %p2, align 16 + %aesmc1= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese1) #2 + store <16 x i8> %aesmc1, <16 x i8>* %x3, align 16 + %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in2) #2 + store <16 x i8> %aese2, <16 x i8>* %x4, align 16 + %aesmc2= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese2) #2 + store <16 x i8> %aesmc2, <16 x i8>* %x5, align 16 + ret void + +; CHECK-LABEL: aes_load_store: +; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc {{v[0-7].16b}}, [[VA]] +; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc {{v[0-7].16b}}, [[VB]] }