Index: llvm/trunk/lib/Analysis/MemorySSA.cpp =================================================================== --- llvm/trunk/lib/Analysis/MemorySSA.cpp +++ llvm/trunk/lib/Analysis/MemorySSA.cpp @@ -192,8 +192,6 @@ } }; -enum class Reorderability { Always, IfNoAlias, Never }; - } // end namespace llvm /// This does one-way checks to see if Use could theoretically be hoisted above @@ -202,22 +200,16 @@ /// This assumes that, for the purposes of MemorySSA, Use comes directly after /// MayClobber, with no potentially clobbering operations in between them. /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) -static Reorderability getLoadReorderability(const LoadInst *Use, - const LoadInst *MayClobber) { +static bool areLoadsReorderable(const LoadInst *Use, + const LoadInst *MayClobber) { bool VolatileUse = Use->isVolatile(); bool VolatileClobber = MayClobber->isVolatile(); // Volatile operations may never be reordered with other volatile operations. if (VolatileUse && VolatileClobber) - return Reorderability::Never; - - // The lang ref allows reordering of volatile and non-volatile operations. - // Whether an aliasing nonvolatile load and volatile load can be reordered, - // though, is ambiguous. Because it may not be best to exploit this ambiguity, - // we only allow volatile/non-volatile reordering if the volatile and - // non-volatile operations don't alias. - Reorderability Result = VolatileUse || VolatileClobber - ? Reorderability::IfNoAlias - : Reorderability::Always; + return false; + // Otherwise, volatile doesn't matter here. From the language reference: + // 'optimizers may change the order of volatile operations relative to + // non-volatile operations.'" // If a load is seq_cst, it cannot be moved above other loads. If its ordering // is weaker, it can be moved above other loads. We just need to be sure that @@ -229,9 +221,7 @@ bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), AtomicOrdering::Acquire); - if (SeqCstUse || MayClobberIsAcquire) - return Reorderability::Never; - return Result; + return !(SeqCstUse || MayClobberIsAcquire); } static bool instructionClobbersQuery(MemoryDef *MD, @@ -265,18 +255,9 @@ return isModOrRefSet(I); } - if (auto *DefLoad = dyn_cast(DefInst)) { - if (auto *UseLoad = dyn_cast(UseInst)) { - switch (getLoadReorderability(UseLoad, DefLoad)) { - case Reorderability::Always: - return false; - case Reorderability::Never: - return true; - case Reorderability::IfNoAlias: - return !AA.isNoAlias(UseLoc, MemoryLocation::get(DefLoad)); - } - } - } + if (auto *DefLoad = dyn_cast(DefInst)) + if (auto *UseLoad = dyn_cast(UseInst)) + return !areLoadsReorderable(UseLoad, DefLoad); return isModSet(AA.getModRefInfo(DefInst, UseLoc)); } Index: llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll =================================================================== --- llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll +++ llvm/trunk/test/Analysis/MemorySSA/volatile-clobber.ll @@ -22,8 +22,7 @@ ret i32 %4 } -; Ensuring that we don't automatically hoist nonvolatile loads around volatile -; loads +; Ensuring we allow hoisting nonvolatile loads around volatile loads. ; CHECK-LABEL define void @volatile_only define void @volatile_only(i32* %arg1, i32* %arg2) { ; Trivially NoAlias/MustAlias @@ -36,7 +35,7 @@ ; CHECK: MemoryUse(liveOnEntry) ; CHECK-NEXT: load i32, i32* %b load i32, i32* %b -; CHECK: MemoryUse(1) +; CHECK: MemoryUse(liveOnEntry) ; CHECK-NEXT: load i32, i32* %a load i32, i32* %a @@ -44,7 +43,7 @@ ; CHECK: 2 = MemoryDef(1) ; CHECK-NEXT: load volatile i32, i32* %arg1 load volatile i32, i32* %arg1 -; CHECK: MemoryUse(2) +; CHECK: MemoryUse(liveOnEntry) ; CHECK-NEXT: load i32, i32* %arg2 load i32, i32* %arg2 @@ -75,10 +74,10 @@ ; CHECK: MemoryUse(1) ; CHECK-NEXT: load atomic i32, i32* %b unordered, align 4 load atomic i32, i32* %b unordered, align 4 -; CHECK: MemoryUse(2) +; CHECK: MemoryUse(1) ; CHECK-NEXT: load atomic i32, i32* %a unordered, align 4 load atomic i32, i32* %a unordered, align 4 -; CHECK: MemoryUse(2) +; CHECK: MemoryUse(1) ; CHECK-NEXT: load i32, i32* %a load i32, i32* %a @@ -86,7 +85,7 @@ ; CHECK: 3 = MemoryDef(2) ; CHECK-NEXT: load atomic volatile i32, i32* %arg1 monotonic, align 4 load atomic volatile i32, i32* %arg1 monotonic, align 4 -; CHECK: MemoryUse(3) +; CHECK: MemoryUse(1) ; CHECK-NEXT: load i32, i32* %arg2 load i32, i32* %arg2