diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp --- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -556,13 +556,11 @@ return MemDepResult::getClobber(SI); } - // FIXME: this is overly conservative. // While volatile access cannot be eliminated, they do not have to clobber // non-aliasing locations, as normal accesses can for example be reordered // with volatile accesses. if (SI->isVolatile()) - if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || - isOtherMemAccess(QueryInst)) + if (!QueryInst || QueryInst->isVolatile()) return MemDepResult::getClobber(SI); // If alias analysis can tell that this store is guaranteed to not modify diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll --- a/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll +++ b/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll @@ -53,11 +53,8 @@ define i32 @test_store(i32 %x) { ; CHECK-LABEL: @test_store( -; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w unordered, align 4 ; CHECK-NEXT: store volatile i32 [[X:%.*]], i32* @u, align 4 -; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w unordered, align 4 -; CHECK-NEXT: [[RES:%.*]] = sub i32 [[L1]], [[L2]] -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %l1 = load atomic i32, i32* @w unordered, align 4 store volatile i32 %x, i32* @u, align 4