Index: lib/Analysis/Loads.cpp =================================================================== --- lib/Analysis/Loads.cpp +++ lib/Analysis/Loads.cpp @@ -280,9 +280,17 @@ Value *AccessedPtr; unsigned AccessedAlign; if (LoadInst *LI = dyn_cast(BBI)) { + // Ignore volatile loads. The execution of a volatile load cannot + // be used to prove an address is backed by regular memory; it can, + // for example, point to an MMIO register. + if (LI->isVolatile()) + continue; AccessedPtr = LI->getPointerOperand(); AccessedAlign = LI->getAlignment(); } else if (StoreInst *SI = dyn_cast(BBI)) { + // Ignore volatile stores (see comment for loads). + if (SI->isVolatile()) + continue; AccessedPtr = SI->getPointerOperand(); AccessedAlign = SI->getAlignment(); } else Index: test/Transforms/SROA/phi-and-select.ll =================================================================== --- test/Transforms/SROA/phi-and-select.ll +++ test/Transforms/SROA/phi-and-select.ll @@ -632,3 +632,15 @@ %result = load i32, i32* %phi, align 4 ret i32 %result } + +; Don't speculate a load based on an earlier volatile operation. +define i8 @volatile_select(i8* %p, i1 %b) { +; CHECK-LABEL: @volatile_select( +; CHECK: select i1 %b, i8* %p, i8* %p2 + %p2 = alloca i8 + store i8 0, i8* %p2 + store volatile i8 0, i8* %p + %px = select i1 %b, i8* %p, i8* %p2 + %v2 = load i8, i8* %px + ret i8 %v2 +}