diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h --- a/llvm/include/llvm/IR/Value.h +++ b/llvm/include/llvm/IR/Value.h @@ -693,6 +693,9 @@ /// If \p AllowNonInbounds is true, offsets in GEPs are stripped and /// accumulated even if the GEP is not "inbounds". /// + /// If \p AllowInvariantGroup is true then this method also looks through + /// strip.invariant.group and launder.invariant.group intrinsics. + /// /// If \p ExternalAnalysis is provided it will be used to calculate a offset /// when a operand of GEP is not constant. /// For example, for a value \p ExternalAnalysis might try to calculate a @@ -708,13 +711,15 @@ /// is unchanged. const Value *stripAndAccumulateConstantOffsets( const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, + bool AllowInvariantGroup = false, function_ref ExternalAnalysis = nullptr) const; Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, - bool AllowNonInbounds) { + bool AllowNonInbounds, + bool AllowInvariantGroup = false) { return const_cast( static_cast(this)->stripAndAccumulateConstantOffsets( - DL, Offset, AllowNonInbounds)); + DL, Offset, AllowNonInbounds, AllowInvariantGroup)); } /// This is a wrapper around stripAndAccumulateConstantOffsets with the diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -6089,73 +6089,27 @@ return ::SimplifyFreezeInst(Op0, Q); } -static Constant *ConstructLoadOperandConstant(Value *Op) { - SmallVector Worklist; - // Invalid IR in unreachable code may contain self-referential values. Don't infinitely loop. - SmallPtrSet Visited; - Worklist.push_back(Op); - while (true) { - Value *CurOp = Worklist.back(); - if (!Visited.insert(CurOp).second) - return nullptr; - if (isa(CurOp)) - break; - if (auto *BC = dyn_cast(CurOp)) { - Worklist.push_back(BC->getOperand(0)); - } else if (auto *GEP = dyn_cast(CurOp)) { - for (unsigned I = 1; I != GEP->getNumOperands(); ++I) { - if (!isa(GEP->getOperand(I))) - return nullptr; - } - Worklist.push_back(GEP->getOperand(0)); - } else if (auto *II = dyn_cast(CurOp)) { - if (II->isLaunderOrStripInvariantGroup()) - Worklist.push_back(II->getOperand(0)); - else - return nullptr; - } else { - return nullptr; - } - } - - Constant *NewOp = cast(Worklist.pop_back_val()); - while (!Worklist.empty()) { - Value *CurOp = Worklist.pop_back_val(); - if (isa(CurOp)) { - NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType()); - } else if (auto *GEP = dyn_cast(CurOp)) { - SmallVector Idxs; - Idxs.reserve(GEP->getNumOperands() - 1); - for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { - Idxs.push_back(cast(GEP->getOperand(I))); - } - NewOp = ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), NewOp, - Idxs, GEP->isInBounds(), - GEP->getInRangeIndex()); - } else { - assert(isa(CurOp) && - cast(CurOp)->isLaunderOrStripInvariantGroup() && - "expected invariant group intrinsic"); - NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType()); - } - } - return NewOp; -} - static Value *SimplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q) { if (LI->isVolatile()) return nullptr; - // Try to make the load operand a constant, specifically handle - // invariant.group intrinsics. + APInt Offset(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()), 0); auto *PtrOpC = dyn_cast(PtrOp); - if (!PtrOpC) - PtrOpC = ConstructLoadOperandConstant(PtrOp); + // Try to convert operand into a constant by stripping offsets while looking + // through invariant.group intrinsics. Don't bother if the underlying object + // is not constant, as calculating GEP offsets is expensive. + if (!PtrOpC && isa(getUnderlyingObject(PtrOp))) { + PtrOp = PtrOp->stripAndAccumulateConstantOffsets( + Q.DL, Offset, /* AllowNonInbounts */ true, + /* AllowInvariantGroup */ true); + // Index size may have changed due to address space casts. + Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType())); + PtrOpC = dyn_cast(PtrOp); + } if (PtrOpC) - return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL); - + return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Offset, Q.DL); return nullptr; } diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -706,6 +706,7 @@ const Value *Value::stripAndAccumulateConstantOffsets( const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, + bool AllowInvariantGroup, function_ref ExternalAnalysis) const { if (!getType()->isPtrOrPtrVectorTy()) return this; @@ -765,6 +766,8 @@ } else if (const auto *Call = dyn_cast(V)) { if (const Value *RV = Call->getReturnedArgOperand()) V = RV; + if (AllowInvariantGroup && Call->isLaunderOrStripInvariantGroup()) + V = Call->getArgOperand(0); } assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!"); } while (Visited.insert(V).second); diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -410,6 +410,7 @@ }; return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, + /* AllowInvariant */ false, AttributorAnalysis); }