Index: lib/Target/ARM/ARMLoadStoreOptimizer.cpp =================================================================== --- lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1580,7 +1580,8 @@ const MachineMemOperand &MMO = **MI.memoperands_begin(); // Don't touch volatile memory accesses - we may be changing their order. - if (MMO.isVolatile()) + // TODO: Unclear whether we need to be as defense about atomic operations. + if (MMO.isVolatile() || MMO.isAtomic()) return false; // Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is @@ -2144,7 +2145,8 @@ // At the moment, we ignore the memoryoperand's value. // If we want to use AliasAnalysis, we should check it accordingly. if (!Op0->hasOneMemOperand() || - (*Op0->memoperands_begin())->isVolatile()) + (*Op0->memoperands_begin())->isVolatile() || + (*Op0->memoperands_begin())->isAtomic()) return false; unsigned Align = (*Op0->memoperands_begin())->getAlignment(); Index: lib/Target/Hexagon/HexagonExpandCondsets.cpp =================================================================== --- lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -733,7 +733,7 @@ HasDef = true; } for (auto &Mo : MI->memoperands()) - if (Mo->isVolatile()) + if (Mo->isVolatile() || Mo->isAtomic()) return false; return true; } Index: lib/Target/Hexagon/HexagonFrameLowering.cpp =================================================================== --- lib/Target/Hexagon/HexagonFrameLowering.cpp +++ lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -2101,7 +2101,7 @@ } if (!Bad) { for (auto *Mo : In.memoperands()) { - if (!Mo->isVolatile()) + if (!Mo->isVolatile() && !Mo->isAtomic()) continue; Bad = true; break; Index: lib/Target/Hexagon/HexagonSplitDouble.cpp =================================================================== --- lib/Target/Hexagon/HexagonSplitDouble.cpp +++ lib/Target/Hexagon/HexagonSplitDouble.cpp @@ -152,8 +152,8 @@ } bool HexagonSplitDoubleRegs::isVolatileInstr(const MachineInstr *MI) const { - for (auto &I : MI->memoperands()) - if (I->isVolatile()) + for (auto &MO : MI->memoperands()) + if (MO->isVolatile() || MO->isAtomic()) return true; return false; } Index: lib/Target/Lanai/LanaiMemAluCombiner.cpp =================================================================== --- lib/Target/Lanai/LanaiMemAluCombiner.cpp +++ lib/Target/Lanai/LanaiMemAluCombiner.cpp @@ -158,7 +158,8 @@ const MachineMemOperand *MemOperand = *MI.memoperands_begin(); // Don't move volatile memory accesses - if (MemOperand->isVolatile()) + // TODO: unclear if we need to be as conservative about atomics + if (MemOperand->isVolatile() || MemOperand->isAtomic()) return false; return true; Index: lib/Target/SystemZ/SystemZInstrInfo.cpp =================================================================== --- lib/Target/SystemZ/SystemZInstrInfo.cpp +++ lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -1188,7 +1188,7 @@ // MVCs that turn out to be redundant. if (OpNum == 0 && MI.hasOneMemOperand()) { MachineMemOperand *MMO = *MI.memoperands_begin(); - if (MMO->getSize() == Size && !MMO->isVolatile()) { + if (MMO->getSize() == Size && !MMO->isVolatile() && !MMO->isAtomic()) { // Handle conversion of loads. if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXLoad)) { return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),