Changeset View
Changeset View
Standalone View
Standalone View
llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
Show All 11 Lines | |||||
/// provide more precise answers to certain TTI queries, while letting the | /// provide more precise answers to certain TTI queries, while letting the | ||||
/// target independent and default TTI implementations handle the rest. | /// target independent and default TTI implementations handle the rest. | ||||
// | // | ||||
//===----------------------------------------------------------------------===// | //===----------------------------------------------------------------------===// | ||||
#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H | #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H | ||||
#define LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H | #define LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H | ||||
#include "AMDGPU.h" | |||||
#include "llvm/CodeGen/BasicTTIImpl.h" | #include "llvm/CodeGen/BasicTTIImpl.h" | ||||
namespace llvm { | namespace llvm { | ||||
class AMDGPUTargetMachine; | class AMDGPUTargetMachine; | ||||
class GCNSubtarget; | class GCNSubtarget; | ||||
class InstCombiner; | class InstCombiner; | ||||
class Loop; | class Loop; | ||||
▲ Show 20 Lines • Show All 138 Lines • ▼ Show 20 Lines | public: | ||||
bool isInlineAsmSourceOfDivergence(const CallInst *CI, | bool isInlineAsmSourceOfDivergence(const CallInst *CI, | ||||
ArrayRef<unsigned> Indices = {}) const; | ArrayRef<unsigned> Indices = {}) const; | ||||
InstructionCost getVectorInstrCost(unsigned Opcode, Type *ValTy, | InstructionCost getVectorInstrCost(unsigned Opcode, Type *ValTy, | ||||
unsigned Index); | unsigned Index); | ||||
bool isSourceOfDivergence(const Value *V) const; | bool isSourceOfDivergence(const Value *V) const; | ||||
bool isAlwaysUniform(const Value *V) const; | bool isAlwaysUniform(const Value *V) const; | ||||
unsigned getFlatAddressSpace() const { | unsigned getFlatAddressSpace() const; | ||||
// Don't bother running InferAddressSpaces pass on graphics shaders which | |||||
// don't use flat addressing. | |||||
if (IsGraphics) | |||||
return -1; | |||||
return AMDGPUAS::FLAT_ADDRESS; | |||||
} | |||||
bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, | bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, | ||||
Intrinsic::ID IID) const; | Intrinsic::ID IID) const; | ||||
Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, | Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, | ||||
Value *NewV) const; | Value *NewV) const; | ||||
bool canSimplifyLegacyMulToMul(const Value *Op0, const Value *Op1, | bool canSimplifyLegacyMulToMul(const Value *Op0, const Value *Op1, | ||||
InstCombiner &IC) const; | InstCombiner &IC) const; | ||||
Show All 36 Lines |