Index: llvm/lib/Analysis/MemoryDependenceAnalysis.cpp =================================================================== --- llvm/lib/Analysis/MemoryDependenceAnalysis.cpp +++ llvm/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -556,13 +556,11 @@ return MemDepResult::getClobber(SI); } - // FIXME: this is overly conservative. // While volatile access cannot be eliminated, they do not have to clobber // non-aliasing locations, as normal accesses can for example be reordered // with volatile accesses. if (SI->isVolatile()) - if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || - isOtherMemAccess(QueryInst)) + if (!QueryInst || QueryInst->isVolatile()) return MemDepResult::getClobber(SI); // If alias analysis can tell that this store is guaranteed to not modify Index: llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll =================================================================== --- /dev/null +++ llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll @@ -0,0 +1,29 @@ +; RUN: opt -S -gvn -basic-aa < %s | FileCheck %s + +; Check that volatile store does not prevent re-ordering of simple loads/store. + +%union.anon = type { i32 } + +@u = global i32 5, align 4 +@w = global i32 10, align 4 + +define i32 @test_load() { +; CHECK-LABEL: @test_load( +; CHECK: ret i32 %lv + %l1 = load atomic i32, i32* @w unordered, align 4 + %lv = load volatile i32, i32* @u, align 4 + %l2 = load atomic i32, i32* @w unordered, align 4 + %res.1 = sub i32 %l1, %l2 + %res = add i32 %res.1, %lv + ret i32 %res +} + +define i32 @test_store(i32 %x) { +; CHECK-LABEL: @test_store( +; CHECK: ret i32 0 + %l1 = load atomic i32, i32* @w unordered, align 4 + store volatile i32 %x, i32* @u, align 4 + %l2 = load atomic i32, i32* @w unordered, align 4 + %res = sub i32 %l1, %l2 + ret i32 %res +}