diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -684,9 +684,10 @@ /// If the AM is supported, the return value must be >= 0. /// If the AM is not supported, it returns a negative value. /// TODO: Handle pre/postinc as well. - int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, - bool HasBaseReg, int64_t Scale, - unsigned AddrSpace = 0) const; + InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, + int64_t BaseOffset, bool HasBaseReg, + int64_t Scale, + unsigned AddrSpace = 0) const; /// Return true if the loop strength reduce pass should make /// Instruction* based TTI queries to isLegalAddressingMode(). This is @@ -1483,9 +1484,10 @@ virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0; virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) = 0; virtual bool prefersVectorizedAddressing() = 0; - virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, - int64_t BaseOffset, bool HasBaseReg, - int64_t Scale, unsigned AddrSpace) = 0; + virtual InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, + int64_t BaseOffset, + bool HasBaseReg, int64_t Scale, + unsigned AddrSpace) = 0; virtual bool LSRWithInstrQueries() = 0; virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0; virtual bool isProfitableToHoist(Instruction *I) = 0; @@ -1864,9 +1866,10 @@ bool prefersVectorizedAddressing() override { return Impl.prefersVectorizedAddressing(); } - int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, - bool HasBaseReg, int64_t Scale, - unsigned AddrSpace) override { + InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, + int64_t BaseOffset, bool HasBaseReg, + int64_t Scale, + unsigned AddrSpace) override { return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace); } diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -270,9 +270,10 @@ bool prefersVectorizedAddressing() const { return true; } - int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, - bool HasBaseReg, int64_t Scale, - unsigned AddrSpace) const { + InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, + int64_t BaseOffset, bool HasBaseReg, + int64_t Scale, + unsigned AddrSpace) const { // Guess that all legal addressing mode are free. if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace)) diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -281,8 +281,9 @@ return TargetTransformInfoImplBase::isProfitableLSRChainElement(I); } - int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, - bool HasBaseReg, int64_t Scale, unsigned AddrSpace) { + InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, + int64_t BaseOffset, bool HasBaseReg, + int64_t Scale, unsigned AddrSpace) { TargetLoweringBase::AddrMode AM; AM.BaseGV = BaseGV; AM.BaseOffs = BaseOffset; diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -49,6 +49,7 @@ #include "llvm/Support/AtomicOrdering.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/InstructionCost.h" #include "llvm/Support/MachineValueType.h" #include #include @@ -2347,8 +2348,9 @@ /// If the AM is not supported, it returns a negative value. /// TODO: Handle pre/postinc as well. /// TODO: Remove default argument - virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, - Type *Ty, unsigned AS = 0) const { + virtual InstructionCost getScalingFactorCost(const DataLayout &DL, + const AddrMode &AM, Type *Ty, + unsigned AS = 0) const { // Default: assume that any scaling factor used in a legal AM is free. if (isLegalAddressingMode(DL, AM, Ty, AS)) return 0; diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -423,12 +423,11 @@ return TTIImpl->prefersVectorizedAddressing(); } -int TargetTransformInfo::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, - int64_t BaseOffset, - bool HasBaseReg, int64_t Scale, - unsigned AddrSpace) const { - int Cost = TTIImpl->getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg, - Scale, AddrSpace); +InstructionCost TargetTransformInfo::getScalingFactorCost( + Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, + int64_t Scale, unsigned AddrSpace) const { + InstructionCost Cost = TTIImpl->getScalingFactorCost( + Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace); assert(Cost >= 0 && "TTI should not produce negative costs!"); return Cost; } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -609,8 +609,8 @@ /// of the specified type. /// If the AM is supported, the return value must be >= 0. /// If the AM is not supported, it returns a negative value. - int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, - unsigned AS) const override; + InstructionCost getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, + Type *Ty, unsigned AS) const override; /// Return true if an FMA operation is faster than a pair of fmul and fadd /// instructions. fmuladd intrinsics will be expanded to FMAs when this method diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -11724,9 +11724,8 @@ return true; } -int AArch64TargetLowering::getScalingFactorCost(const DataLayout &DL, - const AddrMode &AM, Type *Ty, - unsigned AS) const { +InstructionCost AArch64TargetLowering::getScalingFactorCost( + const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const { // Scaling factors are not free at all. // Operands | Rt Latency // ------------------------------------------- diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -443,8 +443,9 @@ /// addressing mode represented by AM. /// If the AM is supported, the return value must be >= 0. /// If the AM is not supported, the return value must be negative. - int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, - unsigned AS) const override; + InstructionCost getScalingFactorCost(const DataLayout &DL, + const AddrMode &AM, Type *Ty, + unsigned AS) const override; bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -17079,9 +17079,10 @@ return true; } -int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL, - const AddrMode &AM, Type *Ty, - unsigned AS) const { +InstructionCost ARMTargetLowering::getScalingFactorCost(const DataLayout &DL, + const AddrMode &AM, + Type *Ty, + unsigned AS) const { if (isLegalAddressingMode(DL, AM, Ty, AS)) { if (Subtarget->hasFPAO()) return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -1166,8 +1166,9 @@ /// of the specified type. /// If the AM is supported, the return value must be >= 0. /// If the AM is not supported, it returns a negative value. - int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, - unsigned AS) const override; + InstructionCost getScalingFactorCost(const DataLayout &DL, + const AddrMode &AM, Type *Ty, + unsigned AS) const override; /// This is used to enable splatted operand transforms for vector shifts /// and vector funnel shifts. diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -51911,9 +51911,10 @@ return Res; } -int X86TargetLowering::getScalingFactorCost(const DataLayout &DL, - const AddrMode &AM, Type *Ty, - unsigned AS) const { +InstructionCost X86TargetLowering::getScalingFactorCost(const DataLayout &DL, + const AddrMode &AM, + Type *Ty, + unsigned AS) const { // Scaling factors are not free at all. // An indexed folded instruction, i.e., inst (reg1, reg2, scale), // will take 2 allocations in the out of order engine instead of 1 diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -1009,9 +1009,9 @@ const LSRUse &LU, const Formula &F); // Get the cost of the scaling factor used in F for LU. -static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, - const LSRUse &LU, const Formula &F, - const Loop &L); +static InstructionCost getScalingFactorCost(const TargetTransformInfo &TTI, + const LSRUse &LU, const Formula &F, + const Loop &L); namespace { @@ -1360,7 +1360,7 @@ C.NumBaseAdds += (F.UnfoldedOffset != 0); // Accumulate non-free scaling amounts. - C.ScaleCost += getScalingFactorCost(*TTI, LU, F, *L); + C.ScaleCost += *getScalingFactorCost(*TTI, LU, F, *L).getValue(); // Tally up the non-zero immediates. for (const LSRFixup &Fixup : LU.Fixups) { @@ -1757,9 +1757,9 @@ F.Scale); } -static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, - const LSRUse &LU, const Formula &F, - const Loop &L) { +static InstructionCost getScalingFactorCost(const TargetTransformInfo &TTI, + const LSRUse &LU, const Formula &F, + const Loop &L) { if (!F.Scale) return 0; @@ -1772,14 +1772,15 @@ switch (LU.Kind) { case LSRUse::Address: { // Check the scaling factor cost with both the min and max offsets. - int ScaleCostMinOffset = TTI.getScalingFactorCost( + InstructionCost ScaleCostMinOffset = TTI.getScalingFactorCost( LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MinOffset, F.HasBaseReg, F.Scale, LU.AccessTy.AddrSpace); - int ScaleCostMaxOffset = TTI.getScalingFactorCost( + InstructionCost ScaleCostMaxOffset = TTI.getScalingFactorCost( LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MaxOffset, F.HasBaseReg, F.Scale, LU.AccessTy.AddrSpace); assert(ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >= 0 && + ScaleCostMinOffset.isValid() && ScaleCostMaxOffset.isValid() && "Legal addressing mode has an illegal cost!"); return std::max(ScaleCostMinOffset, ScaleCostMaxOffset); }