diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -36,6 +36,8 @@ cl::desc("Maximum users to visit in copy from constant transform"), cl::Hidden); +extern cl::opt EnableInferAlignmentPass; + /// isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived) /// pointer to an alloca. Ignore any reads of the pointer, return false if we /// see any stores or other unknown uses. If we see pointer arithmetic, keep @@ -1048,11 +1050,13 @@ if (Instruction *Res = combineLoadToOperationType(*this, LI)) return Res; - // Attempt to improve the alignment. - Align KnownAlign = getOrEnforceKnownAlignment( - Op, DL.getPrefTypeAlign(LI.getType()), DL, &LI, &AC, &DT); - if (KnownAlign > LI.getAlign()) - LI.setAlignment(KnownAlign); + if (!EnableInferAlignmentPass) { + // Attempt to improve the alignment. + Align KnownAlign = getOrEnforceKnownAlignment( + Op, DL.getPrefTypeAlign(LI.getType()), DL, &LI, &AC, &DT); + if (KnownAlign > LI.getAlign()) + LI.setAlignment(KnownAlign); + } // Replace GEP indices if possible. if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) @@ -1445,11 +1449,13 @@ if (combineStoreToValueType(*this, SI)) return eraseInstFromFunction(SI); - // Attempt to improve the alignment. - const Align KnownAlign = getOrEnforceKnownAlignment( - Ptr, DL.getPrefTypeAlign(Val->getType()), DL, &SI, &AC, &DT); - if (KnownAlign > SI.getAlign()) - SI.setAlignment(KnownAlign); + if (!EnableInferAlignmentPass) { + // Attempt to improve the alignment. + const Align KnownAlign = getOrEnforceKnownAlignment( + Ptr, DL.getPrefTypeAlign(Val->getType()), DL, &SI, &AC, &DT); + if (KnownAlign > SI.getAlign()) + SI.setAlignment(KnownAlign); + } // Try to canonicalize the stored type. if (unpackStoreToAggregate(*this, SI)) diff --git a/llvm/test/Transforms/InstCombine/align-2d-gep.ll b/llvm/test/Transforms/InstCombine/align-2d-gep.ll deleted file mode 100644 --- a/llvm/test/Transforms/InstCombine/align-2d-gep.ll +++ /dev/null @@ -1,65 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -passes=instcombine -S | FileCheck %s -target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" - -; A multi-dimensional array in a nested loop doing vector stores that -; aren't yet aligned. Instcombine can understand the addressing in the -; Nice case to prove 16 byte alignment. In the Awkward case, the inner -; array dimension is not even, so the stores to it won't always be -; aligned. Instcombine should prove alignment in exactly one of the two -; stores. - -@Nice = global [1001 x [20000 x double]] zeroinitializer, align 32 -@Awkward = global [1001 x [20001 x double]] zeroinitializer, align 32 - -define void @foo() nounwind { -; CHECK-LABEL: @foo( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[BB7_OUTER:%.*]] -; CHECK: bb7.outer: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT26:%.*]], [[BB11:%.*]] ] -; CHECK-NEXT: br label [[BB1:%.*]] -; CHECK: bb1: -; CHECK-NEXT: [[J:%.*]] = phi i64 [ 0, [[BB7_OUTER]] ], [ [[INDVAR_NEXT:%.*]], [[BB1]] ] -; CHECK-NEXT: [[T4:%.*]] = getelementptr [1001 x [20000 x double]], ptr @Nice, i64 0, i64 [[I]], i64 [[J]] -; CHECK-NEXT: store <2 x double> zeroinitializer, ptr [[T4]], align 16 -; CHECK-NEXT: [[S4:%.*]] = getelementptr [1001 x [20001 x double]], ptr @Awkward, i64 0, i64 [[I]], i64 [[J]] -; CHECK-NEXT: store <2 x double> zeroinitializer, ptr [[S4]], align 8 -; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[J]], 2 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 556 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[BB11]], label [[BB1]] -; CHECK: bb11: -; CHECK-NEXT: [[INDVAR_NEXT26]] = add i64 [[I]], 1 -; CHECK-NEXT: [[EXITCOND27:%.*]] = icmp eq i64 [[INDVAR_NEXT26]], 991 -; CHECK-NEXT: br i1 [[EXITCOND27]], label [[RETURN_SPLIT:%.*]], label [[BB7_OUTER]] -; CHECK: return.split: -; CHECK-NEXT: ret void -; -entry: - br label %bb7.outer - -bb7.outer: - %i = phi i64 [ 0, %entry ], [ %indvar.next26, %bb11 ] - br label %bb1 - -bb1: - %j = phi i64 [ 0, %bb7.outer ], [ %indvar.next, %bb1 ] - - %t4 = getelementptr [1001 x [20000 x double]], ptr @Nice, i64 0, i64 %i, i64 %j - store <2 x double>, ptr %t4, align 8 - - %s4 = getelementptr [1001 x [20001 x double]], ptr @Awkward, i64 0, i64 %i, i64 %j - store <2 x double>, ptr %s4, align 8 - - %indvar.next = add i64 %j, 2 - %exitcond = icmp eq i64 %indvar.next, 556 - br i1 %exitcond, label %bb11, label %bb1 - -bb11: - %indvar.next26 = add i64 %i, 1 - %exitcond27 = icmp eq i64 %indvar.next26, 991 - br i1 %exitcond27, label %return.split, label %bb7.outer - -return.split: - ret void -} diff --git a/llvm/test/Transforms/InstCombine/align-addr.ll b/llvm/test/Transforms/InstCombine/align-addr.ll --- a/llvm/test/Transforms/InstCombine/align-addr.ll +++ b/llvm/test/Transforms/InstCombine/align-addr.ll @@ -2,9 +2,6 @@ ; RUN: opt < %s -passes=instcombine -S | FileCheck %s target datalayout = "E-p:64:64:64-p1:32:32:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" -; Instcombine should be able to prove vector alignment in the -; presence of a few mild address computation tricks. - define void @test0(ptr %b, i64 %n, i64 %u, i64 %y) nounwind { ; CHECK-LABEL: @test0( ; CHECK-NEXT: entry: diff --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll --- a/llvm/test/Transforms/InstCombine/assume.ll +++ b/llvm/test/Transforms/InstCombine/assume.ll @@ -7,8 +7,7 @@ declare void @llvm.assume(i1) #1 -; Check that the alignment has been upgraded and that the assume has not -; been removed: +; Check that the assume has not been removed: define i32 @foo1(ptr %a) #0 { ; DEFAULT-LABEL: @foo1( @@ -266,7 +265,7 @@ define i1 @nonnull1(ptr %a) { ; CHECK-LABEL: @nonnull1( -; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8, !nonnull [[META6:![0-9]+]], !noundef [[META6]] +; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8, !nonnull !6, !noundef !6 ; CHECK-NEXT: tail call void @escape(ptr nonnull [[LOAD]]) ; CHECK-NEXT: ret i1 false ; diff --git a/llvm/test/Transforms/InstCombine/assume_inevitable.ll b/llvm/test/Transforms/InstCombine/assume_inevitable.ll --- a/llvm/test/Transforms/InstCombine/assume_inevitable.ll +++ b/llvm/test/Transforms/InstCombine/assume_inevitable.ll @@ -3,7 +3,6 @@ ; Check that assume is propagated backwards through all ; operations that are `isGuaranteedToTransferExecutionToSuccessor` -; (it should reach the load and mark it as `align 32`). define i32 @assume_inevitable(ptr %a, ptr %b, ptr %c) { ; CHECK-LABEL: @assume_inevitable( ; CHECK-NEXT: entry: diff --git a/llvm/test/Transforms/InstCombine/constant-fold-gep.ll b/llvm/test/Transforms/InstCombine/constant-fold-gep.ll --- a/llvm/test/Transforms/InstCombine/constant-fold-gep.ll +++ b/llvm/test/Transforms/InstCombine/constant-fold-gep.ll @@ -97,25 +97,6 @@ ret i16 %E } -; Check that we improve the alignment information. -; The base pointer is 16-byte aligned and we access the field at -; an offset of 8-byte. -; Every element in the @CallerInfos array is 16-byte aligned so -; any access from the following gep is 8-byte aligned. -%struct.CallerInfo = type { ptr, i32 } -@CallerInfos = global [128 x %struct.CallerInfo] zeroinitializer, align 16 - -define i32 @test_gep_in_struct(i64 %idx) { -; CHECK-LABEL: @test_gep_in_struct( -; CHECK-NEXT: [[NS7:%.*]] = getelementptr inbounds [128 x %struct.CallerInfo], ptr @CallerInfos, i64 0, i64 [[IDX:%.*]], i32 1 -; CHECK-NEXT: [[RES:%.*]] = load i32, ptr [[NS7]], align 8 -; CHECK-NEXT: ret i32 [[RES]] -; - %NS7 = getelementptr inbounds [128 x %struct.CallerInfo], ptr @CallerInfos, i64 0, i64 %idx, i32 1 - %res = load i32, ptr %NS7, align 1 - ret i32 %res -} - @g = external global i8 @g2 = external global i8