diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -112,7 +112,7 @@ Type *RetTy = nullptr; Intrinsic::ID IID; SmallVector ParamTys; - SmallVector Arguments; + SmallVector Arguments; FastMathFlags FMF; unsigned VF = 1; // If ScalarizationCost is UINT_MAX, the cost of scalarizing the @@ -146,7 +146,7 @@ ArrayRef Tys); IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy, - ArrayRef Args); + ArrayRef Args); Intrinsic::ID getID() const { return IID; } const IntrinsicInst *getInst() const { return II; } @@ -154,7 +154,7 @@ unsigned getVectorFactor() const { return VF; } FastMathFlags getFlags() const { return FMF; } unsigned getScalarizationCost() const { return ScalarizationCost; } - const SmallVectorImpl &getArgs() const { return Arguments; } + const SmallVectorImpl &getArgs() const { return Arguments; } const SmallVectorImpl &getArgTypes() const { return ParamTys; } bool isTypeBasedOnly() const { @@ -951,7 +951,7 @@ unsigned getMaxInterleaveFactor(unsigned VF) const; /// Collect properties of V used in cost analysis, e.g. OP_PowerOf2. - static OperandValueKind getOperandInfo(Value *V, + static OperandValueKind getOperandInfo(const Value *V, OperandValueProperties &OpProps); /// This is an approximation of reciprocal throughput of a math/logic op. @@ -1037,9 +1037,10 @@ /// \p I - the optional original context instruction, if one exists, e.g. the /// load/store to transform or the call to the gather/scatter intrinsic int getGatherScatterOpCost( - unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, - unsigned Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, - const Instruction *I = nullptr) const; + unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, + unsigned Alignment, + TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, + const Instruction *I = nullptr) const; /// \return The cost of the interleaved memory operation. /// \p Opcode is the memory operation code @@ -1429,10 +1430,11 @@ unsigned Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) = 0; - virtual int getGatherScatterOpCost( - unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, - unsigned Alignment, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr) = 0; + virtual int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, + const Value *Ptr, bool VariableMask, + unsigned Alignment, + TTI::TargetCostKind CostKind, + const Instruction *I = nullptr) = 0; virtual int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, @@ -1848,10 +1850,10 @@ return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind); } - int getGatherScatterOpCost( - unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, - unsigned Alignment, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr) override { + int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, + bool VariableMask, unsigned Alignment, + TTI::TargetCostKind CostKind, + const Instruction *I = nullptr) override { return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); } diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -470,10 +470,11 @@ return 1; } - unsigned getGatherScatterOpCost( - unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, - unsigned Alignment, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr) { + unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, + const Value *Ptr, bool VariableMask, + unsigned Alignment, + TTI::TargetCostKind CostKind, + const Instruction *I = nullptr) { return 1; } diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -1139,14 +1139,14 @@ : 1); assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type"); const IntrinsicInst *I = ICA.getInst(); - const SmallVectorImpl &Args = ICA.getArgs(); + const SmallVectorImpl &Args = ICA.getArgs(); FastMathFlags FMF = ICA.getFlags(); switch (IID) { default: { // Assume that we need to scalarize this intrinsic. SmallVector Types; - for (Value *Op : Args) { + for (const Value *Op : Args) { Type *OpTy = Op->getType(); assert(VF == 1 || !OpTy->isVectorTy()); Types.push_back(VF == 1 ? OpTy : FixedVectorType::get(OpTy, VF)); @@ -1173,7 +1173,7 @@ } case Intrinsic::masked_scatter: { assert(VF == 1 && "Can't vectorize types here."); - Value *Mask = Args[3]; + const Value *Mask = Args[3]; bool VarMask = !isa(Mask); unsigned Alignment = cast(Args[2])->getZExtValue(); return ConcreteTTI->getGatherScatterOpCost(Instruction::Store, @@ -1183,7 +1183,7 @@ } case Intrinsic::masked_gather: { assert(VF == 1 && "Can't vectorize types here."); - Value *Mask = Args[2]; + const Value *Mask = Args[2]; bool VarMask = !isa(Mask); unsigned Alignment = cast(Args[1])->getZExtValue(); return ConcreteTTI->getGatherScatterOpCost( @@ -1207,9 +1207,9 @@ } case Intrinsic::fshl: case Intrinsic::fshr: { - Value *X = Args[0]; - Value *Y = Args[1]; - Value *Z = Args[2]; + const Value *X = Args[0]; + const Value *Y = Args[1]; + const Value *Z = Args[2]; TTI::OperandValueProperties OpPropsX, OpPropsY, OpPropsZ, OpPropsBW; TTI::OperandValueKind OpKindX = TTI::getOperandInfo(X, OpPropsX); TTI::OperandValueKind OpKindY = TTI::getOperandInfo(Y, OpPropsY); diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -68,7 +68,7 @@ const CallBase &CI) : II(dyn_cast(&CI)), RetTy(CI.getType()), IID(Id) { - if (auto *FPMO = dyn_cast(&CI)) + if (const auto *FPMO = dyn_cast(&CI)) FMF = FPMO->getFastMathFlags(); FunctionType *FTy = @@ -96,7 +96,7 @@ unsigned ScalarCost) : RetTy(CI.getType()), IID(Id), VF(Factor), ScalarizationCost(ScalarCost) { - if (auto *FPMO = dyn_cast(&CI)) + if (const auto *FPMO = dyn_cast(&CI)) FMF = FPMO->getFastMathFlags(); Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end()); @@ -136,8 +136,8 @@ } IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *Ty, - ArrayRef Args) : - RetTy(Ty), IID(Id) { + ArrayRef Args) + : RetTy(Ty), IID(Id) { Arguments.insert(Arguments.begin(), Args.begin(), Args.end()); ParamTys.reserve(Arguments.size()); @@ -633,11 +633,12 @@ } TargetTransformInfo::OperandValueKind -TargetTransformInfo::getOperandInfo(Value *V, OperandValueProperties &OpProps) { +TargetTransformInfo::getOperandInfo(const Value *V, + OperandValueProperties &OpProps) { OperandValueKind OpInfo = OK_AnyValue; OpProps = OP_None; - if (auto *CI = dyn_cast(V)) { + if (const auto *CI = dyn_cast(V)) { if (CI->getValue().isPowerOf2()) OpProps = OP_PowerOf2; return OK_UniformConstantValue; @@ -646,7 +647,7 @@ // A broadcast shuffle creates a uniform value. // TODO: Add support for non-zero index broadcasts. // TODO: Add support for different source vector width. - if (auto *ShuffleInst = dyn_cast(V)) + if (const auto *ShuffleInst = dyn_cast(V)) if (ShuffleInst->isZeroEltSplat()) OpInfo = OK_UniformValue; @@ -661,7 +662,7 @@ if (auto *CI = dyn_cast(Splat)) if (CI->getValue().isPowerOf2()) OpProps = OP_PowerOf2; - } else if (auto *CDS = dyn_cast(V)) { + } else if (const auto *CDS = dyn_cast(V)) { OpProps = OP_PowerOf2; for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) { if (auto *CI = dyn_cast(CDS->getElementAsConstant(I))) @@ -767,10 +768,12 @@ return Cost; } -int TargetTransformInfo::getGatherScatterOpCost( - unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, - unsigned Alignment, TTI::TargetCostKind CostKind, - const Instruction *I) const { +int TargetTransformInfo::getGatherScatterOpCost(unsigned Opcode, Type *DataTy, + const Value *Ptr, + bool VariableMask, + unsigned Alignment, + TTI::TargetCostKind CostKind, + const Instruction *I) const { int Cost = TTIImpl->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); assert(Cost >= 0 && "TTI should not produce negative costs!"); diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h @@ -231,10 +231,11 @@ bool UseMaskForCond = false, bool UseMaskForGaps = false); - unsigned getGatherScatterOpCost( - unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, - unsigned Alignment, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr); + unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, + const Value *Ptr, bool VariableMask, + unsigned Alignment, + TTI::TargetCostKind CostKind, + const Instruction *I = nullptr); bool isLoweredToCall(const Function *F); bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -953,7 +953,7 @@ } unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy, - Value *Ptr, bool VariableMask, + const Value *Ptr, bool VariableMask, unsigned Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { @@ -1032,9 +1032,9 @@ if (ExtSize != 8 && ExtSize != 16) return ScalarCost; - if (auto BC = dyn_cast(Ptr)) + if (const auto *BC = dyn_cast(Ptr)) Ptr = BC->getOperand(0); - if (auto *GEP = dyn_cast(Ptr)) { + if (const auto *GEP = dyn_cast(Ptr)) { if (GEP->getNumOperands() != 2) return ScalarCost; unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType()); @@ -1042,7 +1042,7 @@ if (Scale != 1 && Scale * 8 != ExtSize) return ScalarCost; // And we need to zext (not sext) the indexes from a small enough type. - if (auto ZExt = dyn_cast(GEP->getOperand(1))) { + if (const auto *ZExt = dyn_cast(GEP->getOperand(1))) { if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize) return VectorCost; } diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h @@ -120,8 +120,9 @@ TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency); unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp); - unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr, - bool VariableMask, unsigned Alignment, + unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, + const Value *Ptr, bool VariableMask, + unsigned Alignment, TTI::TargetCostKind CostKind, const Instruction *I); unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp @@ -213,9 +213,8 @@ } unsigned HexagonTTIImpl::getGatherScatterOpCost( - unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, - unsigned Alignment, TTI::TargetCostKind CostKind, - const Instruction *I) { + unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, + unsigned Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); } diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h --- a/llvm/lib/Target/X86/X86TargetTransformInfo.h +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h @@ -144,7 +144,7 @@ int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency); - int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr, + int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, unsigned Alignment, TTI::TargetCostKind CostKind, const Instruction *I); @@ -230,7 +230,7 @@ private: int getGSScalarCost(unsigned Opcode, Type *DataTy, bool VariableMask, unsigned Alignment, unsigned AddressSpace); - int getGSVectorCost(unsigned Opcode, Type *DataTy, Value *Ptr, + int getGSVectorCost(unsigned Opcode, Type *DataTy, const Value *Ptr, unsigned Alignment, unsigned AddressSpace); /// @} diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -2771,7 +2771,7 @@ Intrinsic::ID IID = ICA.getID(); Type *RetTy = ICA.getReturnType(); - const SmallVectorImpl &Args = ICA.getArgs(); + const SmallVectorImpl &Args = ICA.getArgs(); unsigned ISD = ISD::DELETED_NODE; switch (IID) { default: @@ -3849,7 +3849,7 @@ } // Return an average cost of Gather / Scatter instruction, maybe improved later -int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr, +int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, const Value *Ptr, unsigned Alignment, unsigned AddressSpace) { assert(isa(SrcVTy) && "Unexpected type in getGSVectorCost"); @@ -3860,14 +3860,14 @@ // operation will use 16 x 64 indices which do not fit in a zmm and needs // to split. Also check that the base pointer is the same for all lanes, // and that there's at most one variable index. - auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) { + auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) { unsigned IndexSize = DL.getPointerSizeInBits(); - GetElementPtrInst *GEP = dyn_cast(Ptr); + const GetElementPtrInst *GEP = dyn_cast(Ptr); if (IndexSize < 64 || !GEP) return IndexSize; unsigned NumOfVarIndices = 0; - Value *Ptrs = GEP->getPointerOperand(); + const Value *Ptrs = GEP->getPointerOperand(); if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) return IndexSize; for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { @@ -3884,7 +3884,6 @@ return (unsigned)32; }; - // Trying to reduce IndexSize to 32 bits for vector 16. // By default the IndexSize is equal to pointer size. unsigned IndexSize = (ST->hasAVX512() && VF >= 16) @@ -3963,10 +3962,11 @@ } /// Calculate the cost of Gather / Scatter operation -int X86TTIImpl::getGatherScatterOpCost( - unsigned Opcode, Type *SrcVTy, Value *Ptr, bool VariableMask, - unsigned Alignment, TTI::TargetCostKind CostKind, - const Instruction *I = nullptr) { +int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy, + const Value *Ptr, bool VariableMask, + unsigned Alignment, + TTI::TargetCostKind CostKind, + const Instruction *I = nullptr) { if (CostKind != TTI::TCK_RecipThroughput) return 1; diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1553,9 +1553,9 @@ // %inc = add nsw %i.0, 1 // br i1 %tobool - Value *Args[] = - {InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext()) - : ConstantInt::getFalse(InitX->getContext())}; + const Value *Args[] = { + InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext()) + : ConstantInt::getFalse(InitX->getContext())}; // @llvm.dbg doesn't count as they have no semantic effect. auto InstWithoutDebugIt = CurLoop->getHeader()->instructionsWithoutDebug(); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -5932,7 +5932,7 @@ Type *ValTy = getMemInstValueType(I); auto *VectorTy = cast(ToVectorTy(ValTy, VF)); const Align Alignment = getLoadStoreAlignment(I); - Value *Ptr = getLoadStorePointerOperand(I); + const Value *Ptr = getLoadStorePointerOperand(I); return TTI.getAddressComputationCost(VectorTy) + TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,