Index: lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp =================================================================== --- lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -815,7 +815,7 @@ // If this is a volatile load/store that otherwise matched, stop looking // as something is going on that we don't have enough information to // safely transform. Similarly, stop if we see a hint to avoid pairs. - if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI)) + if ((Limit != 1 && MI->hasOrderedMemoryRef()) || TII->isLdStPairSuppressed(MI)) return E; // If the resultant immediate offset of merging these instructions // is out of range for a pairwise instruction, bail and keep looking. @@ -1130,9 +1130,10 @@ MachineBasicBlock::iterator &MBBI) { MachineInstr *MI = MBBI; MachineBasicBlock::iterator E = MI->getParent()->end(); - // If this is a volatile load/store, don't mess with it. + + bool isVolatile = false; if (MI->hasOrderedMemoryRef()) - return false; + isVolatile = true; // Make sure this is a reg+imm (as opposed to an address reloc). if (!getLdStOffsetOp(MI).isImm()) @@ -1145,7 +1146,17 @@ // Look ahead up to ScanLimit instructions for a pairable instruction. LdStPairFlags Flags; - MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, ScanLimit); + MachineBasicBlock::iterator Paired; + if (isVolatile) { + // Only check the next instruction, if this is volatile load. + if (MI->mayLoad() != true) + return false; + + Paired = findMatchingInsn(MBBI, Flags, 1); + } else { + Paired = findMatchingInsn(MBBI, Flags, ScanLimit); + } + if (Paired != E) { if (isSmallTypeLdMerge(MI)) { ++NumSmallTypeMerged;