Index: lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp =================================================================== --- lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -946,6 +946,18 @@ case AArch64::CMP_SWAP_128: return expandCMP_SWAP_128(MBB, MBBI, NextMBBI); + case AArch64::AESMCTrr: + case AArch64::AESIMCTrr: { + MachineInstrBuilder MIB = + BuildMI(MBB, MBBI, MI.getDebugLoc(), + TII->get(Opcode == AArch64::AESMCTrr ? AArch64::AESMCrr : + AArch64::AESIMCrr)) + .add(MI.getOperand(0)) + .add(MI.getOperand(1)); + transferImpOps(MI, MIB, MIB); + MI.eraseFromParent(); + return true; + } } return false; } Index: lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.td +++ lib/Target/AArch64/AArch64InstrInfo.td @@ -37,6 +37,9 @@ AssemblerPredicate<"FeatureFullFP16", "fullfp16">; def HasSPE : Predicate<"Subtarget->hasSPE()">, AssemblerPredicate<"FeatureSPE", "spe">; +def HasFuseAES : Predicate<"Subtarget->hasFuseAES()">, + AssemblerPredicate<"FeatureFuseAES", + "fuse-aes">; def HasSVE : Predicate<"Subtarget->hasSVE()">, AssemblerPredicate<"FeatureSVE", "sve">; @@ -5304,6 +5307,31 @@ def AESMCrr : AESInst< 0b0110, "aesmc", int_aarch64_crypto_aesmc>; def AESIMCrr : AESInst< 0b0111, "aesimc", int_aarch64_crypto_aesimc>; +// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required +// for AES fusion on some CPUs. +let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in { +def AESMCTrr: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">, + Sched<[WriteV]>; +def AESIMCTrr: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">, + Sched<[WriteV]>; +} + +// Only use constrained versions of AES(I)MC instructions if they are paired with +// AESE/AESD. +def : Pat<(v16i8 (int_aarch64_crypto_aesmc + (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1), + (v16i8 V128:$src2))))), + (v16i8 (AESMCTrr (v16i8 (AESErr (v16i8 V128:$src1), + (v16i8 V128:$src2)))))>, + Requires<[HasFuseAES]>; + +def : Pat<(v16i8 (int_aarch64_crypto_aesimc + (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1), + (v16i8 V128:$src2))))), + (v16i8 (AESIMCTrr (v16i8 (AESDrr (v16i8 V128:$src1), + (v16i8 V128:$src2)))))>, + Requires<[HasFuseAES]>; + def SHA1Crrr : SHATiedInstQSV<0b000, "sha1c", int_aarch64_crypto_sha1c>; def SHA1Prrr : SHATiedInstQSV<0b001, "sha1p", int_aarch64_crypto_sha1p>; def SHA1Mrrr : SHATiedInstQSV<0b010, "sha1m", int_aarch64_crypto_sha1m>; Index: lib/Target/AArch64/AArch64MacroFusion.cpp =================================================================== --- lib/Target/AArch64/AArch64MacroFusion.cpp +++ lib/Target/AArch64/AArch64MacroFusion.cpp @@ -119,10 +119,12 @@ switch(SecondOpcode) { // AES encode. case AArch64::AESMCrr : + case AArch64::AESMCTrr : return FirstOpcode == AArch64::AESErr || FirstOpcode == AArch64::INSTRUCTION_LIST_END; // AES decode. case AArch64::AESIMCrr: + case AArch64::AESIMCTrr: return FirstOpcode == AArch64::AESDrr || FirstOpcode == AArch64::INSTRUCTION_LIST_END; } Index: test/CodeGen/AArch64/misched-fusion-aes.ll =================================================================== --- test/CodeGen/AArch64/misched-fusion-aes.ll +++ test/CodeGen/AArch64/misched-fusion-aes.ll @@ -1,10 +1,10 @@ -; RUN: llc %s -o - -mtriple=aarch64-unknown -mattr=+fuse-aes,+crypto | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=generic -mattr=+crypto | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a53 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a73 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS -; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKM1 +; RUN: llc %s -o - -mtriple=aarch64-unknown -mattr=+fuse-aes,+crypto | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=generic -mattr=+crypto | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a53 | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a73 | FileCheck %s +; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m1 | FileCheck %s declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k) declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %d) @@ -76,41 +76,23 @@ ret void ; CHECK-LABEL: aesea: -; CHECKFUSEALLPAIRS: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VA]] -; CHECKFUSEALLPAIRS: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VB]] -; CHECKFUSEALLPAIRS: aese [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VC]] -; CHECKFUSEALLPAIRS: aese [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VD]] -; CHECKFUSEALLPAIRS: aese [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VE]] -; CHECKFUSEALLPAIRS: aese [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VF]] -; CHECKFUSEALLPAIRS: aese [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VG]] -; CHECKFUSEALLPAIRS: aese [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VH]] -; CHECKFUSEALLPAIRS-NOT: aesmc - -; CHECKM1: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VA]] -; CHECKM1: aese [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VB]] -; CHECKM1: aese {{v[0-7].16b}}, {{v[0-7].16b}} -; CHECKM1: aese [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VC]] -; CHECKM1: aese [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VD]] -; CHECKM1: aesmc {{v[0-7].16b}}, [[VH]] -; CHECKM1: aese [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VE]] -; CHECKM1: aese [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VF]] -; CHECKM1: aese [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VG]] +; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VA]], [[VA]] +; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VB]], [[VB]] +; CHECK: aese [[VC:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VC]], [[VC]] +; CHECK: aese [[VD:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VD]], [[VD]] +; CHECK: aese [[VE:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VE]], [[VE]] +; CHECK: aese [[VF:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VF]], [[VF]] +; CHECK: aese [[VG:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VG]], [[VG]] +; CHECK: aese [[VH:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesmc [[VH]], [[VH]] +; CHECK-NOT: aesmc } define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) { @@ -178,41 +160,23 @@ ret void ; CHECK-LABEL: aesda: -; CHECKFUSEALLPAIRS: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VA]] -; CHECKFUSEALLPAIRS: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VB]] -; CHECKFUSEALLPAIRS: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VC]] -; CHECKFUSEALLPAIRS: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VD]] -; CHECKFUSEALLPAIRS: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VE]] -; CHECKFUSEALLPAIRS: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VF]] -; CHECKFUSEALLPAIRS: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VG]] -; CHECKFUSEALLPAIRS: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VH]] -; CHECKFUSEALLPAIRS-NOT: aesimc - -; CHECKM1: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VA]] -; CHECKM1: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VB]] -; CHECKM1: aesd {{v[0-7].16b}}, {{v[0-7].16b}} -; CHECKM1: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VC]] -; CHECKM1: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VD]] -; CHECKM1: aesimc {{v[0-7].16b}}, [[VH]] -; CHECKM1: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VE]] -; CHECKM1: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VF]] -; CHECKM1: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}} -; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VG]] +; CHECK: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VA]], [[VA]] +; CHECK: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VB]], [[VB]] +; CHECK: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VC]], [[VC]] +; CHECK: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VD]], [[VD]] +; CHECK: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VE]], [[VE]] +; CHECK: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VF]], [[VF]] +; CHECK: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VG]], [[VG]] +; CHECK: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}} +; CHECK-NEXT: aesimc [[VH]], [[VH]] +; CHECK-NOT: aesimc } define void @aes_load_store(<16 x i8> *%p1, <16 x i8> *%p2 , <16 x i8> *%p3) { @@ -225,20 +189,20 @@ %in1 = load <16 x i8>, <16 x i8>* %p1, align 16 store <16 x i8> %in1, <16 x i8>* %x1, align 16 %aese1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in1) #2 - store <16 x i8> %aese1, <16 x i8>* %x2, align 16 %in2 = load <16 x i8>, <16 x i8>* %p2, align 16 %aesmc1= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese1) #2 - store <16 x i8> %aesmc1, <16 x i8>* %x3, align 16 %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in2) #2 - store <16 x i8> %aese2, <16 x i8>* %x4, align 16 + store <16 x i8> %aesmc1, <16 x i8>* %x3, align 16 + %in3 = load <16 x i8>, <16 x i8>* %p3, align 16 %aesmc2= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese2) #2 - store <16 x i8> %aesmc2, <16 x i8>* %x5, align 16 + %aese3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %aesmc2, <16 x i8> %in3) #2 + store <16 x i8> %aese3, <16 x i8>* %x5, align 16 ret void ; CHECK-LABEL: aes_load_store: ; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}} -; CHECK-NEXT: aesmc {{v[0-7].16b}}, [[VA]] +; CHECK-NEXT: aesmc [[VA]], [[VA]] ; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}} -; CHECK-NEXT: aesmc {{v[0-7].16b}}, [[VB]] +; CHECK-NEXT: aesmc [[VB]], [[VB]] ; CHECK-NOT: aesmc } Index: test/MC/AArch64/arm64-crypto.s =================================================================== --- test/MC/AArch64/arm64-crypto.s +++ test/MC/AArch64/arm64-crypto.s @@ -1,4 +1,5 @@ ; RUN: llvm-mc -triple arm64-apple-darwin -mattr=crypto -show-encoding -output-asm-variant=1 < %s | FileCheck %s +; RUN: llvm-mc -triple arm64-apple-darwin -mattr='+crypto,+fuse-aes' -show-encoding -output-asm-variant=1 < %s | FileCheck %s foo: aese.16b v0, v1