diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -1106,8 +1106,9 @@ /// \return The expected cost of control-flow related instructions such as /// Phi, Ret, Br. - int getCFInstrCost(unsigned Opcode, - TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const; + InstructionCost + getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const; /// \returns The expected cost of compare and select instructions. If there /// is an existing instruction that holds Opcode, it may be passed in the @@ -1578,8 +1579,8 @@ virtual InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) = 0; - virtual int getCFInstrCost(unsigned Opcode, - TTI::TargetCostKind CostKind) = 0; + virtual InstructionCost getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) = 0; virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, @@ -2052,7 +2053,8 @@ unsigned Index) override { return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index); } - int getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) override { + InstructionCost getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) override { return Impl.getCFInstrCost(Opcode, CostKind); } InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -513,7 +513,8 @@ return 1; } - unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) const { + InstructionCost getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) const { // A phi would be free, unless we're costing the throughput because it // will require a register. if (Opcode == Instruction::PHI && CostKind != TTI::TCK_RecipThroughput) diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -897,7 +897,8 @@ TTI::CastContextHint::None, TTI::TCK_RecipThroughput); } - unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) { + InstructionCost getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) { return BaseT::getCFInstrCost(Opcode, CostKind); } @@ -1022,7 +1023,7 @@ int PackingCost = getScalarizationOverhead(VT, Opcode != Instruction::Store, Opcode == Instruction::Store); - int ConditionalCost = 0; + InstructionCost ConditionalCost = 0; if (VariableMask) { // Compute the cost of conditionally executing the memory operations with // variable masks. This includes extracting the individual conditions, a diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -782,9 +782,10 @@ return Cost; } -int TargetTransformInfo::getCFInstrCost(unsigned Opcode, - TTI::TargetCostKind CostKind) const { - int Cost = TTIImpl->getCFInstrCost(Opcode, CostKind); +InstructionCost +TargetTransformInfo::getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) const { + InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind); assert(Cost >= 0 && "TTI should not produce negative costs!"); return Cost; } diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -141,7 +141,7 @@ InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index); - unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind); + InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind); int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index); diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -655,8 +655,8 @@ CostKind); } -unsigned AArch64TTIImpl::getCFInstrCost(unsigned Opcode, - TTI::TargetCostKind CostKind) { +InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) { if (CostKind != TTI::TCK_RecipThroughput) return Opcode == Instruction::PHI ? 0 : 1; assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind"); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h @@ -163,7 +163,7 @@ ArrayRef Args = ArrayRef(), const Instruction *CxtI = nullptr); - unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind); + InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind); bool isInlineAsmSourceOfDivergence(const CallInst *CI, ArrayRef Indices = {}) const; @@ -251,7 +251,7 @@ bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const; unsigned getMaxInterleaveFactor(unsigned VF); - unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind); + InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind); int getVectorInstrCost(unsigned Opcode, Type *ValTy, unsigned Index); }; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -808,8 +808,8 @@ return LT.first * NElts * InstRate; } -unsigned GCNTTIImpl::getCFInstrCost(unsigned Opcode, - TTI::TargetCostKind CostKind) { +InstructionCost GCNTTIImpl::getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) { if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) return Opcode == Instruction::PHI ? 0 : 1; @@ -1293,8 +1293,8 @@ return 8; } -unsigned R600TTIImpl::getCFInstrCost(unsigned Opcode, - TTI::TargetCostKind CostKind) { +InstructionCost R600TTIImpl::getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) { if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) return Opcode == Instruction::PHI ? 0 : 1; diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h @@ -198,8 +198,7 @@ bool shouldExpandReduction(const IntrinsicInst *II) const { return false; } - int getCFInstrCost(unsigned Opcode, - TTI::TargetCostKind CostKind); + InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind); InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -379,7 +379,8 @@ return getIntImmCost(Imm, Ty, CostKind); } -int ARMTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) { +InstructionCost ARMTTIImpl::getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) { if (CostKind == TTI::TCK_RecipThroughput && (ST->hasNEON() || ST->hasMVEIntegerOps())) { // FIXME: The vectorizer is highly sensistive to the cost of these diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h @@ -154,7 +154,8 @@ const Instruction *I = nullptr); unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index); - unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) { + InstructionCost getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) { return 1; } diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h --- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h +++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h @@ -114,7 +114,7 @@ TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I = nullptr); - int getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind); + InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind); InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp @@ -1002,7 +1002,8 @@ .getValue(); } -int PPCTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) { +InstructionCost PPCTTIImpl::getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) { if (CostKind != TTI::TCK_RecipThroughput) return Opcode == Instruction::PHI ? 0 : 1; // Branches are assumed to be predicted. diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h --- a/llvm/lib/Target/X86/X86TargetTransformInfo.h +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h @@ -206,7 +206,7 @@ int getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind); - unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind); + InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind); int getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -4086,8 +4086,8 @@ return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); } -unsigned -X86TTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) { +InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode, + TTI::TargetCostKind CostKind) { if (CostKind != TTI::TCK_RecipThroughput) return Opcode == Instruction::PHI ? 0 : 1; // Branches are assumed to be predicted. @@ -4206,7 +4206,7 @@ InstructionCost ScalarCompareCost = getCmpSelInstrCost( Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr, CmpInst::BAD_ICMP_PREDICATE, CostKind); - int BranchCost = getCFInstrCost(Instruction::Br, CostKind); + InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); }