Index: llvm/lib/Transforms/Scalar/SROA.cpp =================================================================== --- llvm/lib/Transforms/Scalar/SROA.cpp +++ llvm/lib/Transforms/Scalar/SROA.cpp @@ -2519,6 +2519,8 @@ NewLI->setAAMetadata(AATags); if (LI.isVolatile()) NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); + if (NewLI->isAtomic()) + NewLI->setAlignment(MaybeAlign(LI.getAlignment())); // Any !nonnull metadata or !range metadata on the old load is also valid // on the new load. This is even true in some cases even when the loads @@ -2709,6 +2711,8 @@ NewSI->setAAMetadata(AATags); if (SI.isVolatile()) NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); + if (NewSI->isAtomic()) + NewSI->setAlignment(MaybeAlign(SI.getAlignment())); Pass.DeadInsts.insert(&SI); deleteIfTriviallyDead(OldOp); Index: llvm/test/Transforms/SROA/alignment.ll =================================================================== --- llvm/test/Transforms/SROA/alignment.ll +++ llvm/test/Transforms/SROA/alignment.ll @@ -228,4 +228,19 @@ ret void } +%struct = type { i32, i32 } +define dso_local i32 @pr45010(%struct* %A) { +; CHECK-LABEL: @pr45010 +; CHECK: load atomic volatile i32, {{.*}}, align 4 + + %B = alloca %struct, align 4 + %A.i = getelementptr inbounds %struct, %struct* %A, i32 0, i32 0 + %B.i = getelementptr inbounds %struct, %struct* %B, i32 0, i32 0 + %1 = load i32, i32* %A.i, align 4 + store i32 %1, i32* %B.i, align 4 + %2 = bitcast %struct* %B to i32* + %x = load atomic volatile i32, i32* %2 acquire, align 4 + ret i32 %x +} + declare void @populate(i8*)