Index: llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td +++ llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td @@ -379,3 +379,22 @@ foreach intr = AMDGPUImageDimAtomicIntrinsics in def : SourceOfDivergence; + +class AlwaysUniform { + Intrinsic Intr = intr; +} + +def UniformIntrinsics : GenericTable { + let FilterClass = "AlwaysUniform"; + let Fields = ["Intr"]; + + let PrimaryKey = ["Intr"]; + let PrimaryKeyName = "lookupAlwaysUniform"; +} + +def : AlwaysUniform; +def : AlwaysUniform; +def : AlwaysUniform; +def : AlwaysUniform; +def : AlwaysUniform; +def : AlwaysUniform; Index: llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -928,19 +928,8 @@ } bool GCNTTIImpl::isAlwaysUniform(const Value *V) const { - if (const IntrinsicInst *Intrinsic = dyn_cast(V)) { - switch (Intrinsic->getIntrinsicID()) { - default: - return false; - case Intrinsic::amdgcn_readfirstlane: - case Intrinsic::amdgcn_readlane: - case Intrinsic::amdgcn_icmp: - case Intrinsic::amdgcn_fcmp: - case Intrinsic::amdgcn_ballot: - case Intrinsic::amdgcn_if_break: - return true; - } - } + if (const IntrinsicInst *Intrinsic = dyn_cast(V)) + return AMDGPU::isIntrinsicAlwaysUniform(Intrinsic->getIntrinsicID()); if (const CallInst *CI = dyn_cast(V)) { if (CI->isInlineAsm()) Index: llvm/lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -8371,16 +8371,10 @@ auto IID = static_cast(MI.getIntrinsicID()); if (AMDGPU::isIntrinsicSourceOfDivergence(IID)) return InstructionUniformity::NeverUniform; + if (AMDGPU::isIntrinsicAlwaysUniform(IID)) + return InstructionUniformity::AlwaysUniform; - // FIXME: Get a tablegen table for this. switch (IID) { - case Intrinsic::amdgcn_readfirstlane: - case Intrinsic::amdgcn_readlane: - case Intrinsic::amdgcn_icmp: - case Intrinsic::amdgcn_fcmp: - case Intrinsic::amdgcn_ballot: - case Intrinsic::amdgcn_if_break: - return InstructionUniformity::AlwaysUniform; case Intrinsic::amdgcn_if: case Intrinsic::amdgcn_else: // FIXME: Uniform if second result Index: llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h =================================================================== --- llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -1294,6 +1294,9 @@ /// \returns true if the intrinsic is divergent bool isIntrinsicSourceOfDivergence(unsigned IntrID); +/// \returns true if the intrinsic is uniform +bool isIntrinsicAlwaysUniform(unsigned IntrID); + // Track defaults for fields in the MODE register. struct SIModeRegisterDefaults { /// Floating point opcodes that support exception flag gathering quiet and Index: llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -2634,7 +2634,13 @@ }; const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr); +struct AlwaysUniform { + unsigned Intr; +}; +const AlwaysUniform *lookupAlwaysUniform(unsigned Intr); + #define GET_SourcesOfDivergence_IMPL +#define GET_UniformIntrinsics_IMPL #define GET_Gfx9BufferFormat_IMPL #define GET_Gfx10BufferFormat_IMPL #define GET_Gfx11PlusBufferFormat_IMPL @@ -2646,6 +2652,10 @@ return lookupSourceOfDivergence(IntrID); } +bool isIntrinsicAlwaysUniform(unsigned IntrID) { + return lookupAlwaysUniform(IntrID); +} + const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp, uint8_t NumComponents, uint8_t NumFormat,