Index: llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp =================================================================== --- llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp +++ llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp @@ -18,6 +18,7 @@ #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/BasicAliasAnalysis.h" +#include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/LogicCombine.h" #include "llvm/Analysis/TargetLibraryInfo.h" @@ -852,6 +853,88 @@ return MadeChange; } +// Return a minimum gep stride, greatest common divisor of consective gep +// indices type sizes (c.f. Bézout's identity). Currently ignore the indices +// constantness and struct type. +uint64_t GetMinimumGEPStride(Value *PtrOp, const DataLayout DL) { + + if (auto GEP = dyn_cast(PtrOp)) { + uint64_t g; + auto Euc = [](uint64_t a, uint64_t b) { + if (a < b) { + uint64_t t = a; + a = b; + b = t; + } + while (b != 0) { + uint64_t r = a % b; + a = b; + b = r; + } + return a; + }; + g = DL.getTypeStoreSize(GEP->getSourceElementType()); + Value *V = GEP; + while (auto GEP = dyn_cast(V)) { + g = Euc(g, DL.getTypeStoreSize(GEP->getResultElementType())); + V = GEP->getPointerOperand(); + } + return g; + } + + return 1; +} + +/// If C is a constant patterned array and all valid loaded results for given +/// alignment are same to a constant, return that constant. +static bool foldPatternedLoads(Instruction &I, const DataLayout &DL) { + + auto *LI = dyn_cast(&I); + if (!LI) + return false; + + auto *PtrOp = LI->getOperand(0); + auto *GV = dyn_cast(getUnderlyingObject(PtrOp)); + if (!GV || !GV->isConstant() || + (!GV->hasDefinitiveInitializer() && !GV->hasUniqueInitializer())) + return false; + + uint64_t LoadAlign = LI->getAlign().value(); + if (LoadAlign > GV->getAlign().valueOrOne().value()) + return false; + Constant *C = GV->getInitializer(); + Type *LoadTy = LI->getType(); + + unsigned GVSize = DL.getTypeStoreSize(C->getType()); + + // Bail for large initializers in excess of 64K to avoid allocating + // too much memory. + if (!GVSize || UINT16_MAX < GVSize) + return false; + + unsigned LoadSize = LoadTy->getScalarSizeInBits() / 8; + const APInt Offset(DL.getTypeSizeInBits(C->getType()), 0); + Constant *Ca = ConstantFoldLoadFromConst( + C, LoadTy, APInt(DL.getTypeSizeInBits(C->getType()), 0), DL); + + // Any possible offset could be multiple of minimum GEP stride. And any valid + // offset is multiple of load alignment, so checking onle multiples of bigger + // one is sufficient to say results' equality. + uint64_t Stride = GetMinimumGEPStride(PtrOp, DL); + Stride = Stride < LoadAlign ? LoadAlign : Stride; + + for (uint64_t ByteOffset = Stride, E = GVSize - LoadSize; ByteOffset <= E; + ByteOffset += Stride) + if (Ca != ConstantFoldLoadFromConst( + C, LoadTy, + APInt(DL.getTypeSizeInBits(C->getType()), ByteOffset), DL)) + return false; + I.replaceAllUsesWith(Ca); + + return true; +} + + /// This is the entry point for folds that could be implemented in regular /// InstCombine, but they are separated because they are not expected to /// occur frequently and/or have more than a constant-length pattern match. @@ -878,6 +961,7 @@ MadeChange |= tryToFPToSat(I, TTI); MadeChange |= tryToRecognizeTableBasedCttz(I); MadeChange |= foldConsecutiveLoads(I, DL, TTI, AA); + MadeChange |= foldPatternedLoads(I, DL); // NOTE: This function introduces erasing of the instruction `I`, so it // needs to be called at the end of this sequence, otherwise we may make // bugs. Index: llvm/test/Transforms/AggressiveInstCombine/patterned-load.ll =================================================================== --- llvm/test/Transforms/AggressiveInstCombine/patterned-load.ll +++ llvm/test/Transforms/AggressiveInstCombine/patterned-load.ll @@ -7,9 +7,7 @@ define i8 @gep_load_i8_align2(i64 %idx){ ; CHECK-LABEL: @gep_load_i8_align2( -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr @constarray, i64 [[IDX:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 2 -; CHECK-NEXT: ret i8 [[TMP2]] +; CHECK-NEXT: ret i8 1 ; %1 = getelementptr inbounds i8, ptr @constarray, i64 %idx %2 = load i8, ptr %1, align 2 @@ -29,10 +27,11 @@ } define i32 @gep_i32_load_i32_align4(i64 %idx){ -; CHECK-LABEL: @gep_i32_load_i32_align4( -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr @constarray, i64 [[IDX:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: ret i32 [[TMP2]] +; LE-LABEL: @gep_i32_load_i32_align4( +; LE-NEXT: ret i32 65537 +; +; BE-LABEL: @gep_i32_load_i32_align4( +; BE-NEXT: ret i32 16777472 ; %1 = getelementptr inbounds i32, ptr @constarray, i64 %idx %2 = load i32, ptr %1, align 4 @@ -40,10 +39,11 @@ } define i32 @gep_i32_load_i32_align4_packedstruct(i64 %idx){ -; CHECK-LABEL: @gep_i32_load_i32_align4_packedstruct( -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr @constpackedstruct, i64 [[IDX:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: ret i32 [[TMP2]] +; LE-LABEL: @gep_i32_load_i32_align4_packedstruct( +; LE-NEXT: ret i32 65537 +; +; BE-LABEL: @gep_i32_load_i32_align4_packedstruct( +; BE-NEXT: ret i32 16777472 ; %1 = getelementptr inbounds i32, ptr @constpackedstruct, i64 %idx %2 = load i32, ptr %1, align 4 @@ -75,15 +75,13 @@ } define i32 @gep_i16_load_i32_align1(i64 %idx){ -; CHECK-LABEL: @gep_i16_load_i32_align1( -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr @constarray, i64 [[IDX:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 1 -; CHECK-NEXT: ret i32 [[TMP2]] +; LE-LABEL: @gep_i16_load_i32_align1( +; LE-NEXT: ret i32 65537 +; +; BE-LABEL: @gep_i16_load_i32_align1( +; BE-NEXT: ret i32 16777472 ; %1 = getelementptr inbounds i16, ptr @constarray, i64 %idx %2 = load i32, ptr %1, align 1 ret i32 %2 } -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; BE: {{.*}} -; LE: {{.*}}