diff --git a/llvm/include/llvm/Analysis/InstructionSimplify.h b/llvm/include/llvm/Analysis/InstructionSimplify.h --- a/llvm/include/llvm/Analysis/InstructionSimplify.h +++ b/llvm/include/llvm/Analysis/InstructionSimplify.h @@ -248,8 +248,8 @@ const SimplifyQuery &Q); /// Given operands for a GetElementPtrInst, fold the result or return null. -Value *SimplifyGEPInst(Type *SrcTy, ArrayRef Ops, bool InBounds, - const SimplifyQuery &Q); +Value *SimplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef Indices, + bool InBounds, const SimplifyQuery &Q); /// Given operands for an InsertValueInst, fold the result or return null. Value *SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef Idxs, diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -71,7 +71,7 @@ static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned); static Value *SimplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &, unsigned); -static Value *SimplifyGEPInst(Type *, ArrayRef, bool, +static Value *SimplifyGEPInst(Type *, Value *, ArrayRef, bool, const SimplifyQuery &, unsigned); static Value *SimplifySelectInst(Value *, Value *, Value *, const SimplifyQuery &, unsigned); @@ -4070,9 +4070,9 @@ NewOps[1], Q, MaxRecurse - 1)); if (auto *GEP = dyn_cast(I)) - return PreventSelfSimplify(SimplifyGEPInst(GEP->getSourceElementType(), - NewOps, GEP->isInBounds(), Q, - MaxRecurse - 1)); + return PreventSelfSimplify(SimplifyGEPInst( + GEP->getSourceElementType(), NewOps[0], makeArrayRef(NewOps).slice(1), + GEP->isInBounds(), Q, MaxRecurse - 1)); if (isa(I)) return PreventSelfSimplify( @@ -4430,46 +4430,52 @@ /// Given operands for an GetElementPtrInst, see if we can fold the result. /// If not, this returns null. -static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef Ops, bool InBounds, +static Value *SimplifyGEPInst(Type *SrcTy, Value *Ptr, + ArrayRef Indices, bool InBounds, const SimplifyQuery &Q, unsigned) { // The type of the GEP pointer operand. unsigned AS = - cast(Ops[0]->getType()->getScalarType())->getAddressSpace(); + cast(Ptr->getType()->getScalarType())->getAddressSpace(); // getelementptr P -> P. - if (Ops.size() == 1) - return Ops[0]; + if (Indices.empty()) + return Ptr; // Compute the (pointer) type returned by the GEP instruction. - Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1)); + Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Indices); Type *GEPTy = PointerType::get(LastType, AS); - for (Value *Op : Ops) { - // If one of the operands is a vector, the result type is a vector of - // pointers. All vector operands must have the same number of elements. - if (VectorType *VT = dyn_cast(Op->getType())) { - GEPTy = VectorType::get(GEPTy, VT->getElementCount()); - break; + if (VectorType *VT = dyn_cast(Ptr->getType())) + GEPTy = VectorType::get(GEPTy, VT->getElementCount()); + else { + for (Value *Op : Indices) { + // If one of the operands is a vector, the result type is a vector of + // pointers. All vector operands must have the same number of elements. + if (VectorType *VT = dyn_cast(Op->getType())) { + GEPTy = VectorType::get(GEPTy, VT->getElementCount()); + break; + } } } // getelementptr poison, idx -> poison // getelementptr baseptr, poison -> poison - if (any_of(Ops, [](const auto *V) { return isa(V); })) + if (isa(Ptr) || + any_of(Indices, [](const auto *V) { return isa(V); })) return PoisonValue::get(GEPTy); - if (Q.isUndefValue(Ops[0])) + if (Q.isUndefValue(Ptr)) // If inbounds, we can choose an out-of-bounds pointer as a base pointer. return InBounds ? PoisonValue::get(GEPTy) : UndefValue::get(GEPTy); bool IsScalableVec = - isa(SrcTy) || any_of(Ops, [](const Value *V) { + isa(SrcTy) || any_of(Indices, [](const Value *V) { return isa(V->getType()); }); - if (Ops.size() == 2) { + if (Indices.size() == 1) { // getelementptr P, 0 -> P. - if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy) - return Ops[0]; + if (match(Indices[0], m_Zero()) && Ptr->getType() == GEPTy) + return Ptr; Type *Ty = SrcTy; if (!IsScalableVec && Ty->isSized()) { @@ -4477,37 +4483,37 @@ uint64_t C; uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty); // getelementptr P, N -> P if P points to a type of zero size. - if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy) - return Ops[0]; + if (TyAllocSize == 0 && Ptr->getType() == GEPTy) + return Ptr; // The following transforms are only safe if the ptrtoint cast // doesn't truncate the pointers. - if (Ops[1]->getType()->getScalarSizeInBits() == + if (Indices[0]->getType()->getScalarSizeInBits() == Q.DL.getPointerSizeInBits(AS)) { - auto CanSimplify = [GEPTy, &P, V = Ops[0]]() -> bool { + auto CanSimplify = [GEPTy, &P, Ptr]() -> bool { return P->getType() == GEPTy && - getUnderlyingObject(P) == getUnderlyingObject(V); + getUnderlyingObject(P) == getUnderlyingObject(Ptr); }; // getelementptr V, (sub P, V) -> P if P points to a type of size 1. if (TyAllocSize == 1 && - match(Ops[1], m_Sub(m_PtrToInt(m_Value(P)), - m_PtrToInt(m_Specific(Ops[0])))) && + match(Indices[0], + m_Sub(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Specific(Ptr)))) && CanSimplify()) return P; // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of // size 1 << C. - if (match(Ops[1], m_AShr(m_Sub(m_PtrToInt(m_Value(P)), - m_PtrToInt(m_Specific(Ops[0]))), - m_ConstantInt(C))) && + if (match(Indices[0], m_AShr(m_Sub(m_PtrToInt(m_Value(P)), + m_PtrToInt(m_Specific(Ptr))), + m_ConstantInt(C))) && TyAllocSize == 1ULL << C && CanSimplify()) return P; // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of // size C. - if (match(Ops[1], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)), - m_PtrToInt(m_Specific(Ops[0]))), - m_SpecificInt(TyAllocSize))) && + if (match(Indices[0], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)), + m_PtrToInt(m_Specific(Ptr))), + m_SpecificInt(TyAllocSize))) && CanSimplify()) return P; } @@ -4515,29 +4521,28 @@ } if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 && - all_of(Ops.slice(1).drop_back(1), + all_of(Indices.drop_back(1), [](Value *Idx) { return match(Idx, m_Zero()); })) { unsigned IdxWidth = - Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace()); - if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) { + Q.DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()); + if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) { APInt BasePtrOffset(IdxWidth, 0); Value *StrippedBasePtr = - Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL, - BasePtrOffset); + Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset); // Avoid creating inttoptr of zero here: While LLVMs treatment of // inttoptr is generally conservative, this particular case is folded to // a null pointer, which will have incorrect provenance. // gep (gep V, C), (sub 0, V) -> C - if (match(Ops.back(), + if (match(Indices.back(), m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) && !BasePtrOffset.isZero()) { auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset); return ConstantExpr::getIntToPtr(CI, GEPTy); } // gep (gep V, C), (xor V, -1) -> C-1 - if (match(Ops.back(), + if (match(Indices.back(), m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) && !BasePtrOffset.isOne()) { auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1); @@ -4547,17 +4552,18 @@ } // Check to see if this is constant foldable. - if (!all_of(Ops, [](Value *V) { return isa(V); })) + if (!isa(Ptr) || + !all_of(Indices, [](Value *V) { return isa(V); })) return nullptr; - auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast(Ops[0]), - Ops.slice(1), InBounds); + auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast(Ptr), Indices, + InBounds); return ConstantFoldConstant(CE, Q.DL); } -Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef Ops, bool InBounds, - const SimplifyQuery &Q) { - return ::SimplifyGEPInst(SrcTy, Ops, InBounds, Q, RecursionLimit); +Value *llvm::SimplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef Indices, + bool InBounds, const SimplifyQuery &Q) { + return ::SimplifyGEPInst(SrcTy, Ptr, Indices, InBounds, Q, RecursionLimit); } /// Given operands for an InsertValueInst, see if we can fold the result. @@ -6273,8 +6279,9 @@ break; case Instruction::GetElementPtr: { auto *GEPI = cast(I); - Result = SimplifyGEPInst(GEPI->getSourceElementType(), NewOps, - GEPI->isInBounds(), Q); + Result = + SimplifyGEPInst(GEPI->getSourceElementType(), NewOps[0], + makeArrayRef(NewOps).slice(1), GEPI->isInBounds(), Q); break; } case Instruction::InsertValue: { diff --git a/llvm/lib/Analysis/PHITransAddr.cpp b/llvm/lib/Analysis/PHITransAddr.cpp --- a/llvm/lib/Analysis/PHITransAddr.cpp +++ b/llvm/lib/Analysis/PHITransAddr.cpp @@ -226,7 +226,8 @@ return GEP; // Simplify the GEP to handle 'gep x, 0' -> x etc. - if (Value *V = SimplifyGEPInst(GEP->getSourceElementType(), GEPOps, + if (Value *V = SimplifyGEPInst(GEP->getSourceElementType(), GEPOps[0], + ArrayRef(GEPOps).slice(1), GEP->isInBounds(), {DL, TLI, DT, AC})) { for (unsigned i = 0, e = GEPOps.size(); i != e; ++i) RemoveInstInputs(GEPOps[i], InstInputs); diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2018,11 +2018,12 @@ } Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) { - SmallVector Ops(GEP.operands()); + Value *PtrOp = GEP.getOperand(0); + SmallVector Indices(GEP.indices()); Type *GEPType = GEP.getType(); Type *GEPEltType = GEP.getSourceElementType(); bool IsGEPSrcEleScalable = isa(GEPEltType); - if (Value *V = SimplifyGEPInst(GEPEltType, Ops, GEP.isInBounds(), + if (Value *V = SimplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.isInBounds(), SQ.getWithInstruction(&GEP))) return replaceInstUsesWith(GEP, V); @@ -2045,8 +2046,6 @@ // undef elements to decrease demanded bits } - Value *PtrOp = GEP.getOperand(0); - // Eliminate unneeded casts for indices, and replace indices which displace // by multiples of a zero size type with zero. bool MadeChange = false; @@ -2460,10 +2459,9 @@ // `setSourceElementType()` won't actually update the type of the // existing GEP Value. Causing issues if this Value is accessed when // constructing an AddrSpaceCastInst - Value *NGEP = - GEP.isInBounds() - ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, {Ops[1], Ops[2]}) - : Builder.CreateGEP(SrcEltType, SrcOp, {Ops[1], Ops[2]}); + Value *NGEP = GEP.isInBounds() + ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, Indices) + : Builder.CreateGEP(SrcEltType, SrcOp, Indices); NGEP->takeName(&GEP); // Preserve GEP address space to satisfy users @@ -2547,8 +2545,7 @@ DL.getTypeAllocSize(AI->getAllocatedType()).getKnownMinSize()); if (BasePtrOffset.ule(AllocSize)) { return GetElementPtrInst::CreateInBounds( - GEP.getSourceElementType(), PtrOp, makeArrayRef(Ops).slice(1), - GEP.getName()); + GEP.getSourceElementType(), PtrOp, Indices, GEP.getName()); } } } diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp --- a/llvm/lib/Transforms/Scalar/NewGVN.cpp +++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp @@ -1198,9 +1198,10 @@ if (auto Simplified = checkExprResults(E, I, V)) return Simplified; } else if (auto *GEPI = dyn_cast(I)) { - Value *V = SimplifyGEPInst(GEPI->getSourceElementType(), - ArrayRef(E->op_begin(), E->op_end()), - GEPI->isInBounds(), SQ); + Value *V = + SimplifyGEPInst(GEPI->getSourceElementType(), *E->op_begin(), + makeArrayRef(std::next(E->op_begin()), E->op_end()), + GEPI->isInBounds(), SQ); if (auto Simplified = checkExprResults(E, I, V)) return Simplified; } else if (AllConstant) {