Index: llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp =================================================================== --- llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp +++ llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp @@ -18,6 +18,7 @@ #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/BasicAliasAnalysis.h" +#include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" @@ -824,6 +825,103 @@ return true; } +/// If C is a constant patterned array and all valid loaded results for given +/// alignment are same to a constant, return that constant. +static bool foldPatternedLoads(Instruction &I, const DataLayout &DL) { + + auto *LI = dyn_cast(&I); + if (!LI || LI->isVolatile()) + return false; + + // We can only fold the load if it is from a constant global with definitive + // initializer. Skip expensive logic if this is not the case. + auto *PtrOp = LI->getOperand(0); + auto *GV = dyn_cast(getUnderlyingObject(PtrOp)); + if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) + return false; + + // Check whether pointer arrives back at Global Variable. + // FIXME: This and following loop inner checks seems too pattern specific. Any + // better way to judge the pointer arrives back to Global Variable? + if (auto *Call = dyn_cast(PtrOp)) + if (match(Call, m_Intrinsic())) + return false; + + Type *LoadTy = LI->getType(); + Constant *C = GV->getInitializer(); + + // Bail for large initializers in excess of 4K to avoid allocating + // too much memory. + uint64_t GVSize = DL.getTypeAllocSize(C->getType()); + if (!GVSize || 4096 < GVSize) + return false; + + // Calculate constant offset and minimum GEP stride + unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType()); + APInt ConstOffset(BW, 0); + std::optional Stride; + if (auto *GEP = dyn_cast(PtrOp)) { + Stride = APInt(BW, DL.getTypeAllocSize(GEP->getSourceElementType())); + Value *PtrOpV = GEP; + + // If the GEP is not inbounds, we cannot decide if the possible results is + // equivalent by just seeing a global variable. We need to check for + // consective GEP. + if (!GEP->isInBounds()) + return false; + + // Return a minimum gep stride, greatest common divisor of consective gep + // indices type sizes (c.f. Bézout's identity). + while (auto GEP = dyn_cast(PtrOpV)) { + if (!GEP->isInBounds()) + return false; + MapVector VarOffsets; + if (!GEP->collectOffset(DL, BW, VarOffsets, ConstOffset)) + return false; + + for (auto [_, IndexTypeSize] : VarOffsets) + Stride = APIntOps::GreatestCommonDivisor( + *Stride, APInt(BW, IndexTypeSize.getZExtValue())); + + PtrOpV = GEP->getPointerOperand(); + // Check whether pointer arrives back at Global Variable. + if (auto *Call = dyn_cast(PtrOpV)) + if (match(Call, m_Intrinsic())) + return false; + } + + // In consideration of signed GEP indices, non-negligible offset become + // remainder of divission by minimum GEP stride. + ConstOffset = ConstOffset.srem(*Stride); + } + + // Any possible offset could be multiple of minimum GEP stride. And any valid + // offset is multiple of load alignment, so checking only multiples of bigger + // one is sufficient to say results' equality. + if (auto LA = LI->getAlign(); + LA.value() <= GV->getAlign().valueOrOne().value() && + (!Stride || Stride->getZExtValue() < LA.value())) { + ConstOffset = APInt(BW, 0); + Stride = APInt(BW, LA.value()); + } + + if (!Stride) + return false; + + Constant *Ca = ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL); + if (!Ca) + return false; + + unsigned E = GVSize - DL.getTypeAllocSize(LoadTy); + for (; ConstOffset.getZExtValue() <= E; ConstOffset += *Stride) + if (Ca != ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL)) + return false; + + I.replaceAllUsesWith(Ca); + + return true; +} + /// This is the entry point for folds that could be implemented in regular /// InstCombine, but they are separated because they are not expected to /// occur frequently and/or have more than a constant-length pattern match. @@ -850,6 +948,7 @@ MadeChange |= tryToFPToSat(I, TTI); MadeChange |= tryToRecognizeTableBasedCttz(I); MadeChange |= foldConsecutiveLoads(I, DL, TTI, AA); + MadeChange |= foldPatternedLoads(I, DL); // NOTE: This function introduces erasing of the instruction `I`, so it // needs to be called at the end of this sequence, otherwise we may make // bugs. Index: llvm/test/Transforms/AggressiveInstCombine/patterned-load.ll =================================================================== --- llvm/test/Transforms/AggressiveInstCombine/patterned-load.ll +++ llvm/test/Transforms/AggressiveInstCombine/patterned-load.ll @@ -14,9 +14,7 @@ define i8 @gep_load_i8_align2(i64 %idx){ ; CHECK-LABEL: @gep_load_i8_align2( -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr @constarray1, i64 [[IDX:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 2 -; CHECK-NEXT: ret i8 [[TMP2]] +; CHECK-NEXT: ret i8 1 ; %1 = getelementptr inbounds i8, ptr @constarray1, i64 %idx %2 = load i8, ptr %1, align 2 @@ -76,10 +74,11 @@ } define i32 @gep_i16_load_i32_align1(i64 %idx){ -; CHECK-LABEL: @gep_i16_load_i32_align1( -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr @constarray1, i64 [[IDX:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 1 -; CHECK-NEXT: ret i32 [[TMP2]] +; LE-LABEL: @gep_i16_load_i32_align1( +; LE-NEXT: ret i32 65537 +; +; BE-LABEL: @gep_i16_load_i32_align1( +; BE-NEXT: ret i32 16777472 ; %1 = getelementptr inbounds i16, ptr @constarray1, i64 %idx %2 = load i32, ptr %1, align 1 @@ -87,10 +86,11 @@ } define i32 @gep_i32_load_i32_align8(i64 %idx){ -; CHECK-LABEL: @gep_i32_load_i32_align8( -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr @constarray1, i64 [[IDX:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 8 -; CHECK-NEXT: ret i32 [[TMP2]] +; LE-LABEL: @gep_i32_load_i32_align8( +; LE-NEXT: ret i32 65537 +; +; BE-LABEL: @gep_i32_load_i32_align8( +; BE-NEXT: ret i32 16777472 ; %1 = getelementptr inbounds i32, ptr @constarray1, i64 %idx %2 = load i32, ptr %1, align 8 @@ -98,11 +98,11 @@ } define i32 @gep_i32_load_i32_with_const_offset(i64 %idx){ -; CHECK-LABEL: @gep_i32_load_i32_with_const_offset( -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr @constarray2, i64 1 -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDX:%.*]] -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -; CHECK-NEXT: ret i32 [[TMP3]] +; LE-LABEL: @gep_i32_load_i32_with_const_offset( +; LE-NEXT: ret i32 65537 +; +; BE-LABEL: @gep_i32_load_i32_with_const_offset( +; BE-NEXT: ret i32 16777472 ; %1 = getelementptr inbounds i16, ptr @constarray2, i64 1 %2 = getelementptr inbounds i32, ptr %1, i64 %idx @@ -112,10 +112,7 @@ define i32 @gep_i32_load_i32_const_ptr_array(i64 %idx){ ; CHECK-LABEL: @gep_i32_load_i32_const_ptr_array( -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds ptr, ptr @constptrarray, i64 [[IDX:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -; CHECK-NEXT: ret i32 [[TMP3]] +; CHECK-NEXT: ret i32 42 ; %1 = getelementptr inbounds ptr, ptr @constptrarray, i64 %idx %2 = load ptr, ptr %1, align 4 @@ -124,10 +121,11 @@ } define i32 @gep_i32_load_i32_align4_packedstruct(i64 %idx){ -; CHECK-LABEL: @gep_i32_load_i32_align4_packedstruct( -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr @constpackedstruct, i64 [[IDX:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -; CHECK-NEXT: ret i32 [[TMP2]] +; LE-LABEL: @gep_i32_load_i32_align4_packedstruct( +; LE-NEXT: ret i32 65537 +; +; BE-LABEL: @gep_i32_load_i32_align4_packedstruct( +; BE-NEXT: ret i32 16777472 ; %1 = getelementptr inbounds i32, ptr @constpackedstruct, i64 %idx %2 = load i32, ptr %1, align 4 @@ -147,17 +145,14 @@ } define i32 @gep_i32_load_i32_align4_struct_with_const_offset(i64 %idx){ -; CHECK-LABEL: @gep_i32_load_i32_align4_struct_with_const_offset( -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr @conststruct, i64 1 -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDX:%.*]] -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -; CHECK-NEXT: ret i32 [[TMP3]] +; LE-LABEL: @gep_i32_load_i32_align4_struct_with_const_offset( +; LE-NEXT: ret i32 65537 +; +; BE-LABEL: @gep_i32_load_i32_align4_struct_with_const_offset( +; BE-NEXT: ret i32 16777472 ; %1 = getelementptr inbounds i16, ptr @conststruct, i64 1 %2 = getelementptr inbounds i32, ptr %1, i64 %idx %3 = load i32, ptr %2, align 4 ret i32 %3 } -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; BE: {{.*}} -; LE: {{.*}}