Index: llvm/lib/Target/SPIRV/CMakeLists.txt =================================================================== --- llvm/lib/Target/SPIRV/CMakeLists.txt +++ llvm/lib/Target/SPIRV/CMakeLists.txt @@ -14,12 +14,12 @@ add_public_tablegen_target(SPIRVCommonTableGen) add_llvm_target(SPIRVCodeGen + Registries/SPIRVGlobalObjectRegistry.cpp SPIRVAsmPrinter.cpp SPIRVBuiltins.cpp SPIRVCallLowering.cpp SPIRVDuplicatesTracker.cpp SPIRVEmitIntrinsics.cpp - SPIRVGlobalRegistry.cpp SPIRVInstrInfo.cpp SPIRVInstructionSelector.cpp SPIRVISelLowering.cpp Index: llvm/lib/Target/SPIRV/Registries/SPIRVGlobalObjectRegistry.h =================================================================== --- llvm/lib/Target/SPIRV/Registries/SPIRVGlobalObjectRegistry.h +++ llvm/lib/Target/SPIRV/Registries/SPIRVGlobalObjectRegistry.h @@ -1,20 +1,21 @@ -//===-- SPIRVGlobalRegistry.h - SPIR-V Global Registry ----------*- C++ -*-===// +//===-- SPIRVGlobalObjectRegistry.h - SPIR-V Global Object Registry -*- C++ +//-*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // -//===----------------------------------------------------------------------===// +//===--------------------------------------------------------------------------===// // -// SPIRVGlobalRegistry is used to maintain rich type information required for -// SPIR-V even after lowering from LLVM IR to GMIR. It can convert an llvm::Type -// into an OpTypeXXX instruction, and map it to a virtual register. Also it -// builds and supports consistency of constants and global variables. +// SPIRVGlobalObjectRegistry is used to maintain rich type information required +// for SPIR-V even after lowering from LLVM IR to GMIR. It can convert an +// llvm::Type into an OpTypeXXX instruction, and map it to a virtual register. +// Also it builds and supports consistency of constants and global variables. // -//===----------------------------------------------------------------------===// +//===--------------------------------------------------------------------------===// -#ifndef LLVM_LIB_TARGET_SPIRV_SPIRVTYPEMANAGER_H -#define LLVM_LIB_TARGET_SPIRV_SPIRVTYPEMANAGER_H +#ifndef LLVM_LIB_TARGET_SPIRV_SPIRVGLOBALOBJECTREGISTRY_H +#define LLVM_LIB_TARGET_SPIRV_SPIRVGLOBALOBJECTREGISTRY_H #include "MCTargetDesc/SPIRVBaseInfo.h" #include "SPIRVDuplicatesTracker.h" @@ -24,7 +25,7 @@ namespace llvm { using SPIRVType = const MachineInstr; -class SPIRVGlobalRegistry { +class SPIRVGlobalObjectRegistry { // Registers holding values which have types associated with them. // Initialized upon VReg definition in IRTranslator. // Do not confuse this with DuplicatesTracker as DT maps Type* to @@ -64,35 +65,36 @@ bool EmitIR); public: - SPIRVGlobalRegistry(unsigned PointerSize); + SPIRVGlobalObjectRegistry(unsigned PointerSize); MachineFunction *CurMF; - void add(const Constant *C, MachineFunction *MF, Register R) { + void add(const Constant *C, const MachineFunction *MF, const Register R) { DT.add(C, MF, R); } - void add(const GlobalVariable *GV, MachineFunction *MF, Register R) { + void add(const GlobalVariable *GV, const MachineFunction *MF, + const Register R) { DT.add(GV, MF, R); } - void add(const Function *F, MachineFunction *MF, Register R) { + void add(const Function *F, const MachineFunction *MF, const Register R) { DT.add(F, MF, R); } - void add(const Argument *Arg, MachineFunction *MF, Register R) { + void add(const Argument *Arg, const MachineFunction *MF, const Register R) { DT.add(Arg, MF, R); } - Register find(const Constant *C, MachineFunction *MF) { + Register find(const Constant *C, const MachineFunction *MF) { return DT.find(C, MF); } - Register find(const GlobalVariable *GV, MachineFunction *MF) { + Register find(const GlobalVariable *GV, const MachineFunction *MF) { return DT.find(GV, MF); } - Register find(const Function *F, MachineFunction *MF) { + Register find(const Function *F, const MachineFunction *MF) { return DT.find(F, MF); } @@ -312,4 +314,4 @@ unsigned Opcode); }; } // end namespace llvm -#endif // LLLVM_LIB_TARGET_SPIRV_SPIRVTYPEMANAGER_H +#endif // LLVM_LIB_TARGET_SPIRV_SPIRVGLOBALOBJECTREGISTRY_H Index: llvm/lib/Target/SPIRV/Registries/SPIRVGlobalObjectRegistry.cpp =================================================================== --- llvm/lib/Target/SPIRV/Registries/SPIRVGlobalObjectRegistry.cpp +++ llvm/lib/Target/SPIRV/Registries/SPIRVGlobalObjectRegistry.cpp @@ -1,20 +1,21 @@ -//===-- SPIRVGlobalRegistry.cpp - SPIR-V Global Registry --------*- C++ -*-===// +//===-- SPIRVGlobalObjectRegistry.cpp - SPIR-V Global Object Registry --*- C++ +//-*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // -//===----------------------------------------------------------------------===// +//===-----------------------------------------------------------------------------===// // -// This file contains the implementation of the SPIRVGlobalRegistry class, +// This file contains the implementation of the SPIRVGlobalObjectRegistry class, // which is used to maintain rich type information required for SPIR-V even -// after lowering from LLVM IR to GMIR. It can convert an llvm::Type into -// an OpTypeXXX instruction, and map it to a virtual register. Also it builds -// and supports consistency of constants and global variables. +// after lowering from LLVM IR to GMIR. It can convert an llvm::Type into an +// OpTypeXXX instruction, and map it to a virtual register. Also it builds and +// supports consistency of constants and global variables. // -//===----------------------------------------------------------------------===// +//===-----------------------------------------------------------------------------===// -#include "SPIRVGlobalRegistry.h" +#include "SPIRVGlobalObjectRegistry.h" #include "SPIRV.h" #include "SPIRVBuiltins.h" #include "SPIRVSubtarget.h" @@ -22,19 +23,20 @@ #include "SPIRVUtils.h" using namespace llvm; -SPIRVGlobalRegistry::SPIRVGlobalRegistry(unsigned PointerSize) + +SPIRVGlobalObjectRegistry::SPIRVGlobalObjectRegistry(unsigned PointerSize) : PointerSize(PointerSize) {} -SPIRVType *SPIRVGlobalRegistry::assignIntTypeToVReg(unsigned BitWidth, - Register VReg, - MachineInstr &I, - const SPIRVInstrInfo &TII) { +SPIRVType * +SPIRVGlobalObjectRegistry::assignIntTypeToVReg(unsigned BitWidth, Register VReg, + MachineInstr &I, + const SPIRVInstrInfo &TII) { SPIRVType *SpirvType = getOrCreateSPIRVIntegerType(BitWidth, I, TII); assignSPIRVTypeToVReg(SpirvType, VReg, *CurMF); return SpirvType; } -SPIRVType *SPIRVGlobalRegistry::assignVectTypeToVReg( +SPIRVType *SPIRVGlobalObjectRegistry::assignVectTypeToVReg( SPIRVType *BaseType, unsigned NumElements, Register VReg, MachineInstr &I, const SPIRVInstrInfo &TII) { SPIRVType *SpirvType = @@ -43,7 +45,7 @@ return SpirvType; } -SPIRVType *SPIRVGlobalRegistry::assignTypeToVReg( +SPIRVType *SPIRVGlobalObjectRegistry::assignTypeToVReg( const Type *Type, Register VReg, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccessQual, bool EmitIR) { @@ -53,9 +55,9 @@ return SpirvType; } -void SPIRVGlobalRegistry::assignSPIRVTypeToVReg(SPIRVType *SpirvType, - Register VReg, - MachineFunction &MF) { +void SPIRVGlobalObjectRegistry::assignSPIRVTypeToVReg(SPIRVType *SpirvType, + Register VReg, + MachineFunction &MF) { VRegToTypeMap[&MF][VReg] = SpirvType; } @@ -72,14 +74,15 @@ return Res; } -SPIRVType *SPIRVGlobalRegistry::getOpTypeBool(MachineIRBuilder &MIRBuilder) { +SPIRVType * +SPIRVGlobalObjectRegistry::getOpTypeBool(MachineIRBuilder &MIRBuilder) { return MIRBuilder.buildInstr(SPIRV::OpTypeBool) .addDef(createTypeVReg(MIRBuilder)); } -SPIRVType *SPIRVGlobalRegistry::getOpTypeInt(uint32_t Width, - MachineIRBuilder &MIRBuilder, - bool IsSigned) { +SPIRVType *SPIRVGlobalObjectRegistry::getOpTypeInt(uint32_t Width, + MachineIRBuilder &MIRBuilder, + bool IsSigned) { assert(Width <= 64 && "Unsupported integer width!"); if (Width <= 8) Width = 8; @@ -97,22 +100,23 @@ return MIB; } -SPIRVType *SPIRVGlobalRegistry::getOpTypeFloat(uint32_t Width, - MachineIRBuilder &MIRBuilder) { +SPIRVType * +SPIRVGlobalObjectRegistry::getOpTypeFloat(uint32_t Width, + MachineIRBuilder &MIRBuilder) { auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeFloat) .addDef(createTypeVReg(MIRBuilder)) .addImm(Width); return MIB; } -SPIRVType *SPIRVGlobalRegistry::getOpTypeVoid(MachineIRBuilder &MIRBuilder) { +SPIRVType * +SPIRVGlobalObjectRegistry::getOpTypeVoid(MachineIRBuilder &MIRBuilder) { return MIRBuilder.buildInstr(SPIRV::OpTypeVoid) .addDef(createTypeVReg(MIRBuilder)); } -SPIRVType *SPIRVGlobalRegistry::getOpTypeVector(uint32_t NumElems, - SPIRVType *ElemType, - MachineIRBuilder &MIRBuilder) { +SPIRVType *SPIRVGlobalObjectRegistry::getOpTypeVector( + uint32_t NumElems, SPIRVType *ElemType, MachineIRBuilder &MIRBuilder) { auto EleOpc = ElemType->getOpcode(); assert((EleOpc == SPIRV::OpTypeInt || EleOpc == SPIRV::OpTypeFloat || EleOpc == SPIRV::OpTypeBool) && @@ -126,10 +130,11 @@ } std::tuple -SPIRVGlobalRegistry::getOrCreateConstIntReg(uint64_t Val, SPIRVType *SpvType, - MachineIRBuilder *MIRBuilder, - MachineInstr *I, - const SPIRVInstrInfo *TII) { +SPIRVGlobalObjectRegistry::getOrCreateConstIntReg(uint64_t Val, + SPIRVType *SpvType, + MachineIRBuilder *MIRBuilder, + MachineInstr *I, + const SPIRVInstrInfo *TII) { const IntegerType *LLVMIntTy; if (SpvType) LLVMIntTy = cast(getTypeForSPIRVType(SpvType)); @@ -154,9 +159,10 @@ return std::make_tuple(Res, CI, NewInstr); } -Register SPIRVGlobalRegistry::getOrCreateConstInt(uint64_t Val, MachineInstr &I, - SPIRVType *SpvType, - const SPIRVInstrInfo &TII) { +Register +SPIRVGlobalObjectRegistry::getOrCreateConstInt(uint64_t Val, MachineInstr &I, + SPIRVType *SpvType, + const SPIRVInstrInfo &TII) { assert(SpvType); ConstantInt *CI; Register Res; @@ -185,10 +191,10 @@ return Res; } -Register SPIRVGlobalRegistry::buildConstantInt(uint64_t Val, - MachineIRBuilder &MIRBuilder, - SPIRVType *SpvType, - bool EmitIR) { +Register +SPIRVGlobalObjectRegistry::buildConstantInt(uint64_t Val, + MachineIRBuilder &MIRBuilder, + SPIRVType *SpvType, bool EmitIR) { auto &MF = MIRBuilder.getMF(); const IntegerType *LLVMIntTy; if (SpvType) @@ -232,9 +238,8 @@ return Res; } -Register SPIRVGlobalRegistry::buildConstantFP(APFloat Val, - MachineIRBuilder &MIRBuilder, - SPIRVType *SpvType) { +Register SPIRVGlobalObjectRegistry::buildConstantFP( + APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType) { auto &MF = MIRBuilder.getMF(); const Type *LLVMFPTy; if (SpvType) { @@ -257,7 +262,7 @@ return Res; } -Register SPIRVGlobalRegistry::getOrCreateIntCompositeOrNull( +Register SPIRVGlobalObjectRegistry::getOrCreateIntCompositeOrNull( uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, Constant *CA, unsigned BitWidth, unsigned ElemCnt) { @@ -300,10 +305,9 @@ return Res; } -Register -SPIRVGlobalRegistry::getOrCreateConsIntVector(uint64_t Val, MachineInstr &I, - SPIRVType *SpvType, - const SPIRVInstrInfo &TII) { +Register SPIRVGlobalObjectRegistry::getOrCreateConsIntVector( + uint64_t Val, MachineInstr &I, SPIRVType *SpvType, + const SPIRVInstrInfo &TII) { const Type *LLVMTy = getTypeForSPIRVType(SpvType); assert(LLVMTy->isVectorTy()); const FixedVectorType *LLVMVecTy = cast(LLVMTy); @@ -316,10 +320,9 @@ SpvType->getOperand(2).getImm()); } -Register -SPIRVGlobalRegistry::getOrCreateConsIntArray(uint64_t Val, MachineInstr &I, - SPIRVType *SpvType, - const SPIRVInstrInfo &TII) { +Register SPIRVGlobalObjectRegistry::getOrCreateConsIntArray( + uint64_t Val, MachineInstr &I, SPIRVType *SpvType, + const SPIRVInstrInfo &TII) { const Type *LLVMTy = getTypeForSPIRVType(SpvType); assert(LLVMTy->isArrayTy()); const ArrayType *LLVMArrTy = cast(LLVMTy); @@ -333,7 +336,7 @@ LLVMArrTy->getNumElements()); } -Register SPIRVGlobalRegistry::getOrCreateIntCompositeOrNull( +Register SPIRVGlobalObjectRegistry::getOrCreateIntCompositeOrNull( uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, Constant *CA, unsigned BitWidth, unsigned ElemCnt) { Register Res = DT.find(CA, CurMF); @@ -370,10 +373,9 @@ return Res; } -Register -SPIRVGlobalRegistry::getOrCreateConsIntVector(uint64_t Val, - MachineIRBuilder &MIRBuilder, - SPIRVType *SpvType, bool EmitIR) { +Register SPIRVGlobalObjectRegistry::getOrCreateConsIntVector( + uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, + bool EmitIR) { const Type *LLVMTy = getTypeForSPIRVType(SpvType); assert(LLVMTy->isVectorTy()); const FixedVectorType *LLVMVecTy = cast(LLVMTy); @@ -387,10 +389,9 @@ SpvType->getOperand(2).getImm()); } -Register -SPIRVGlobalRegistry::getOrCreateConsIntArray(uint64_t Val, - MachineIRBuilder &MIRBuilder, - SPIRVType *SpvType, bool EmitIR) { +Register SPIRVGlobalObjectRegistry::getOrCreateConsIntArray( + uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, + bool EmitIR) { const Type *LLVMTy = getTypeForSPIRVType(SpvType); assert(LLVMTy->isArrayTy()); const ArrayType *LLVMArrTy = cast(LLVMTy); @@ -406,8 +407,8 @@ } Register -SPIRVGlobalRegistry::getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, - SPIRVType *SpvType) { +SPIRVGlobalObjectRegistry::getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, + SPIRVType *SpvType) { const Type *LLVMTy = getTypeForSPIRVType(SpvType); const PointerType *LLVMPtrTy = cast(LLVMTy); // Find a constant in DT or build a new one. @@ -426,7 +427,7 @@ return Res; } -Register SPIRVGlobalRegistry::buildConstantSampler( +Register SPIRVGlobalObjectRegistry::buildConstantSampler( Register ResReg, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType) { SPIRVType *SampTy; @@ -449,7 +450,7 @@ return Res->getOperand(0).getReg(); } -Register SPIRVGlobalRegistry::buildGlobalVariable( +Register SPIRVGlobalObjectRegistry::buildGlobalVariable( Register ResVReg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, @@ -533,10 +534,9 @@ return Reg; } -SPIRVType *SPIRVGlobalRegistry::getOpTypeArray(uint32_t NumElems, - SPIRVType *ElemType, - MachineIRBuilder &MIRBuilder, - bool EmitIR) { +SPIRVType *SPIRVGlobalObjectRegistry::getOpTypeArray( + uint32_t NumElems, SPIRVType *ElemType, MachineIRBuilder &MIRBuilder, + bool EmitIR) { assert((ElemType->getOpcode() != SPIRV::OpTypeVoid) && "Invalid array element type"); Register NumElementsVReg = @@ -548,8 +548,9 @@ return MIB; } -SPIRVType *SPIRVGlobalRegistry::getOpTypeOpaque(const StructType *Ty, - MachineIRBuilder &MIRBuilder) { +SPIRVType * +SPIRVGlobalObjectRegistry::getOpTypeOpaque(const StructType *Ty, + MachineIRBuilder &MIRBuilder) { assert(Ty->hasName()); const StringRef Name = Ty->hasName() ? Ty->getName() : ""; Register ResVReg = createTypeVReg(MIRBuilder); @@ -559,9 +560,8 @@ return MIB; } -SPIRVType *SPIRVGlobalRegistry::getOpTypeStruct(const StructType *Ty, - MachineIRBuilder &MIRBuilder, - bool EmitIR) { +SPIRVType *SPIRVGlobalObjectRegistry::getOpTypeStruct( + const StructType *Ty, MachineIRBuilder &MIRBuilder, bool EmitIR) { SmallVector FieldTypes; for (const auto &Elem : Ty->elements()) { SPIRVType *ElemTy = findSPIRVType(Elem, MIRBuilder); @@ -580,7 +580,7 @@ return MIB; } -SPIRVType *SPIRVGlobalRegistry::getOrCreateSpecialType( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateSpecialType( const Type *Ty, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual) { // Some OpenCL and SPIRV builtins like image2d_t are passed in as @@ -593,7 +593,7 @@ return SPIRV::lowerBuiltinType(Ty, AccQual, MIRBuilder, this); } -SPIRVType *SPIRVGlobalRegistry::getOpTypePointer( +SPIRVType *SPIRVGlobalObjectRegistry::getOpTypePointer( SPIRV::StorageClass::StorageClass SC, SPIRVType *ElemType, MachineIRBuilder &MIRBuilder, Register Reg) { if (!Reg.isValid()) @@ -604,14 +604,14 @@ .addUse(getSPIRVTypeID(ElemType)); } -SPIRVType *SPIRVGlobalRegistry::getOpTypeForwardPointer( +SPIRVType *SPIRVGlobalObjectRegistry::getOpTypeForwardPointer( SPIRV::StorageClass::StorageClass SC, MachineIRBuilder &MIRBuilder) { return MIRBuilder.buildInstr(SPIRV::OpTypeForwardPointer) .addUse(createTypeVReg(MIRBuilder)) .addImm(static_cast(SC)); } -SPIRVType *SPIRVGlobalRegistry::getOpTypeFunction( +SPIRVType *SPIRVGlobalObjectRegistry::getOpTypeFunction( SPIRVType *RetType, const SmallVectorImpl &ArgTypes, MachineIRBuilder &MIRBuilder) { auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeFunction) @@ -622,7 +622,7 @@ return MIB; } -SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeFunctionWithArgs( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateOpTypeFunctionWithArgs( const Type *Ty, SPIRVType *RetType, const SmallVectorImpl &ArgTypes, MachineIRBuilder &MIRBuilder) { @@ -633,7 +633,7 @@ return finishCreatingSPIRVType(Ty, SpirvType); } -SPIRVType *SPIRVGlobalRegistry::findSPIRVType( +SPIRVType *SPIRVGlobalObjectRegistry::findSPIRVType( const Type *Ty, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual, bool EmitIR) { Register Reg = DT.find(Ty, &MIRBuilder.getMF()); @@ -644,14 +644,15 @@ return restOfCreateSPIRVType(Ty, MIRBuilder, AccQual, EmitIR); } -Register SPIRVGlobalRegistry::getSPIRVTypeID(const SPIRVType *SpirvType) const { +Register +SPIRVGlobalObjectRegistry::getSPIRVTypeID(const SPIRVType *SpirvType) const { assert(SpirvType && "Attempting to get type id for nullptr type."); if (SpirvType->getOpcode() == SPIRV::OpTypeForwardPointer) return SpirvType->uses().begin()->getReg(); return SpirvType->defs().begin()->getReg(); } -SPIRVType *SPIRVGlobalRegistry::createSPIRVType( +SPIRVType *SPIRVGlobalObjectRegistry::createSPIRVType( const Type *Ty, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual, bool EmitIR) { if (isSpecialOpaqueType(Ty)) @@ -726,7 +727,7 @@ llvm_unreachable("Unable to convert LLVM type to SPIRVType"); } -SPIRVType *SPIRVGlobalRegistry::restOfCreateSPIRVType( +SPIRVType *SPIRVGlobalObjectRegistry::restOfCreateSPIRVType( const Type *Ty, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccessQual, bool EmitIR) { if (TypesInProcessing.count(Ty) && !Ty->isPointerTy()) @@ -746,7 +747,7 @@ return SpirvType; } -SPIRVType *SPIRVGlobalRegistry::getSPIRVTypeForVReg(Register VReg) const { +SPIRVType *SPIRVGlobalObjectRegistry::getSPIRVTypeForVReg(Register VReg) const { auto t = VRegToTypeMap.find(CurMF); if (t != VRegToTypeMap.end()) { auto tt = t->second.find(VReg); @@ -756,7 +757,7 @@ return nullptr; } -SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVType( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateSPIRVType( const Type *Ty, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccessQual, bool EmitIR) { Register Reg = DT.find(Ty, &MIRBuilder.getMF()); @@ -779,15 +780,15 @@ return STy; } -bool SPIRVGlobalRegistry::isScalarOfType(Register VReg, - unsigned TypeOpcode) const { +bool SPIRVGlobalObjectRegistry::isScalarOfType(Register VReg, + unsigned TypeOpcode) const { SPIRVType *Type = getSPIRVTypeForVReg(VReg); assert(Type && "isScalarOfType VReg has no type assigned"); return Type->getOpcode() == TypeOpcode; } -bool SPIRVGlobalRegistry::isScalarOrVectorOfType(Register VReg, - unsigned TypeOpcode) const { +bool SPIRVGlobalObjectRegistry::isScalarOrVectorOfType( + Register VReg, unsigned TypeOpcode) const { SPIRVType *Type = getSPIRVTypeForVReg(VReg); assert(Type && "isScalarOrVectorOfType VReg has no type assigned"); if (Type->getOpcode() == TypeOpcode) @@ -800,8 +801,8 @@ return false; } -unsigned -SPIRVGlobalRegistry::getScalarOrVectorBitWidth(const SPIRVType *Type) const { +unsigned SPIRVGlobalObjectRegistry::getScalarOrVectorBitWidth( + const SPIRVType *Type) const { assert(Type && "Invalid Type pointer"); if (Type->getOpcode() == SPIRV::OpTypeVector) { auto EleTypeReg = Type->getOperand(1).getReg(); @@ -815,7 +816,8 @@ llvm_unreachable("Attempting to get bit width of non-integer/float type."); } -bool SPIRVGlobalRegistry::isScalarOrVectorSigned(const SPIRVType *Type) const { +bool SPIRVGlobalObjectRegistry::isScalarOrVectorSigned( + const SPIRVType *Type) const { assert(Type && "Invalid Type pointer"); if (Type->getOpcode() == SPIRV::OpTypeVector) { auto EleTypeReg = Type->getOperand(1).getReg(); @@ -827,7 +829,7 @@ } SPIRV::StorageClass::StorageClass -SPIRVGlobalRegistry::getPointerStorageClass(Register VReg) const { +SPIRVGlobalObjectRegistry::getPointerStorageClass(Register VReg) const { SPIRVType *Type = getSPIRVTypeForVReg(VReg); assert(Type && Type->getOpcode() == SPIRV::OpTypePointer && Type->getOperand(1).isImm() && "Pointer type is expected"); @@ -835,7 +837,7 @@ Type->getOperand(1).getImm()); } -SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeImage( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateOpTypeImage( MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, @@ -859,8 +861,8 @@ .addImm(AccessQual); } -SPIRVType * -SPIRVGlobalRegistry::getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder) { +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateOpTypeSampler( + MachineIRBuilder &MIRBuilder) { SPIRV::SamplerTypeDescriptor TD; if (auto *Res = checkSpecialInstr(TD, MIRBuilder)) return Res; @@ -869,7 +871,7 @@ return MIRBuilder.buildInstr(SPIRV::OpTypeSampler).addDef(ResVReg); } -SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypePipe( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateOpTypePipe( MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccessQual) { SPIRV::PipeTypeDescriptor TD(AccessQual); @@ -882,7 +884,7 @@ .addImm(AccessQual); } -SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeDeviceEvent( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateOpTypeDeviceEvent( MachineIRBuilder &MIRBuilder) { SPIRV::DeviceEventTypeDescriptor TD; if (auto *Res = checkSpecialInstr(TD, MIRBuilder)) @@ -892,7 +894,7 @@ return MIRBuilder.buildInstr(SPIRV::OpTypeDeviceEvent).addDef(ResVReg); } -SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeSampledImage( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateOpTypeSampledImage( SPIRVType *ImageType, MachineIRBuilder &MIRBuilder) { SPIRV::SampledImageTypeDescriptor TD( SPIRVToLLVMType.lookup(MIRBuilder.getMF().getRegInfo().getVRegDef( @@ -907,7 +909,7 @@ .addUse(getSPIRVTypeID(ImageType)); } -SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeByOpcode( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateOpTypeByOpcode( const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode) { Register ResVReg = DT.find(Ty, &MIRBuilder.getMF()); if (ResVReg.isValid()) @@ -917,9 +919,8 @@ return MIRBuilder.buildInstr(Opcode).addDef(ResVReg); } -const MachineInstr * -SPIRVGlobalRegistry::checkSpecialInstr(const SPIRV::SpecialTypeDescriptor &TD, - MachineIRBuilder &MIRBuilder) { +const MachineInstr *SPIRVGlobalObjectRegistry::checkSpecialInstr( + const SPIRV::SpecialTypeDescriptor &TD, MachineIRBuilder &MIRBuilder) { Register Reg = DT.find(TD, &MIRBuilder.getMF()); if (Reg.isValid()) return MIRBuilder.getMF().getRegInfo().getUniqueVRegDef(Reg); @@ -927,9 +928,8 @@ } // TODO: maybe use tablegen to implement this. -SPIRVType * -SPIRVGlobalRegistry::getOrCreateSPIRVTypeByName(StringRef TypeStr, - MachineIRBuilder &MIRBuilder) { +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateSPIRVTypeByName( + StringRef TypeStr, MachineIRBuilder &MIRBuilder) { unsigned VecElts = 0; auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); @@ -964,16 +964,16 @@ return SpirvTy; } -SPIRVType * -SPIRVGlobalRegistry::getOrCreateSPIRVIntegerType(unsigned BitWidth, - MachineIRBuilder &MIRBuilder) { +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateSPIRVIntegerType( + unsigned BitWidth, MachineIRBuilder &MIRBuilder) { return getOrCreateSPIRVType( IntegerType::get(MIRBuilder.getMF().getFunction().getContext(), BitWidth), MIRBuilder); } -SPIRVType *SPIRVGlobalRegistry::finishCreatingSPIRVType(const Type *LLVMTy, - SPIRVType *SpirvType) { +SPIRVType * +SPIRVGlobalObjectRegistry::finishCreatingSPIRVType(const Type *LLVMTy, + SPIRVType *SpirvType) { assert(CurMF == SpirvType->getMF()); VRegToTypeMap[CurMF][getSPIRVTypeID(SpirvType)] = SpirvType; SPIRVToLLVMType[SpirvType] = LLVMTy; @@ -981,7 +981,7 @@ return SpirvType; } -SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVIntegerType( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateSPIRVIntegerType( unsigned BitWidth, MachineInstr &I, const SPIRVInstrInfo &TII) { Type *LLVMTy = IntegerType::get(CurMF->getFunction().getContext(), BitWidth); Register Reg = DT.find(LLVMTy, CurMF); @@ -995,16 +995,16 @@ return finishCreatingSPIRVType(LLVMTy, MIB); } -SPIRVType * -SPIRVGlobalRegistry::getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder) { +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateSPIRVBoolType( + MachineIRBuilder &MIRBuilder) { return getOrCreateSPIRVType( IntegerType::get(MIRBuilder.getMF().getFunction().getContext(), 1), MIRBuilder); } SPIRVType * -SPIRVGlobalRegistry::getOrCreateSPIRVBoolType(MachineInstr &I, - const SPIRVInstrInfo &TII) { +SPIRVGlobalObjectRegistry::getOrCreateSPIRVBoolType(MachineInstr &I, + const SPIRVInstrInfo &TII) { Type *LLVMTy = IntegerType::get(CurMF->getFunction().getContext(), 1); Register Reg = DT.find(LLVMTy, CurMF); if (Reg.isValid()) @@ -1015,7 +1015,7 @@ return finishCreatingSPIRVType(LLVMTy, MIB); } -SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVVectorType( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateSPIRVVectorType( SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder) { return getOrCreateSPIRVType( FixedVectorType::get(const_cast(getTypeForSPIRVType(BaseType)), @@ -1023,7 +1023,7 @@ MIRBuilder); } -SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVVectorType( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateSPIRVVectorType( SPIRVType *BaseType, unsigned NumElements, MachineInstr &I, const SPIRVInstrInfo &TII) { Type *LLVMTy = FixedVectorType::get( @@ -1039,7 +1039,7 @@ return finishCreatingSPIRVType(LLVMTy, MIB); } -SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVArrayType( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateSPIRVArrayType( SPIRVType *BaseType, unsigned NumElements, MachineInstr &I, const SPIRVInstrInfo &TII) { Type *LLVMTy = ArrayType::get( @@ -1057,7 +1057,7 @@ return finishCreatingSPIRVType(LLVMTy, MIB); } -SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVPointerType( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateSPIRVPointerType( SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass) { return getOrCreateSPIRVType( @@ -1066,7 +1066,7 @@ MIRBuilder); } -SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVPointerType( +SPIRVType *SPIRVGlobalObjectRegistry::getOrCreateSPIRVPointerType( SPIRVType *BaseType, MachineInstr &I, const SPIRVInstrInfo &TII, SPIRV::StorageClass::StorageClass SC) { Type *LLVMTy = @@ -1083,9 +1083,9 @@ return finishCreatingSPIRVType(LLVMTy, MIB); } -Register SPIRVGlobalRegistry::getOrCreateUndef(MachineInstr &I, - SPIRVType *SpvType, - const SPIRVInstrInfo &TII) { +Register +SPIRVGlobalObjectRegistry::getOrCreateUndef(MachineInstr &I, SPIRVType *SpvType, + const SPIRVInstrInfo &TII) { assert(SpvType); const Type *LLVMTy = getTypeForSPIRVType(SpvType); assert(LLVMTy); Index: llvm/lib/Target/SPIRV/SPIRVBuiltins.h =================================================================== --- llvm/lib/Target/SPIRV/SPIRVBuiltins.h +++ llvm/lib/Target/SPIRV/SPIRVBuiltins.h @@ -13,7 +13,7 @@ #ifndef LLVM_LIB_TARGET_SPIRV_SPIRVBUILTINS_H #define LLVM_LIB_TARGET_SPIRV_SPIRVBUILTINS_H -#include "SPIRVGlobalRegistry.h" +#include "Registries/SPIRVGlobalObjectRegistry.h" #include "llvm/CodeGen/GlobalISel/CallLowering.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" @@ -36,11 +36,11 @@ MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl &Args, - SPIRVGlobalRegistry *GR); + SPIRVGlobalObjectRegistry *GOR); /// Handles the translation of the provided special opaque/builtin type \p Type /// to SPIR-V type. Generates the corresponding machine instructions for the /// target type or gets the already existing OpType<...> register from the -/// global registry \p GR. +/// global registry \p GOR. /// /// \return A machine instruction representing the OpType<...> SPIR-V type. /// @@ -48,7 +48,7 @@ SPIRVType *lowerBuiltinType(const Type *Type, AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR); + SPIRVGlobalObjectRegistry *GOR); } // namespace SPIRV } // namespace llvm #endif // LLVM_LIB_TARGET_SPIRV_SPIRVBUILTINS_H Index: llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp =================================================================== --- llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp +++ llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp @@ -274,16 +274,16 @@ /// \returns Tuple of the resulting register and its type. static std::tuple buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { LLT Type; - SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder); + SPIRVType *BoolType = GOR->getOrCreateSPIRVBoolType(MIRBuilder); if (ResultType->getOpcode() == SPIRV::OpTypeVector) { unsigned VectorElements = ResultType->getOperand(2).getImm(); BoolType = - GR->getOrCreateSPIRVVectorType(BoolType, VectorElements, MIRBuilder); + GOR->getOrCreateSPIRVVectorType(BoolType, VectorElements, MIRBuilder); const FixedVectorType *LLVMVectorType = - cast(GR->getTypeForSPIRVType(BoolType)); + cast(GOR->getTypeForSPIRVType(BoolType)); Type = LLT::vector(LLVMVectorType->getElementCount(), 1); } else { Type = LLT::scalar(1); @@ -292,7 +292,7 @@ Register ResultRegister = MIRBuilder.getMRI()->createGenericVirtualRegister(Type); MIRBuilder.getMRI()->setRegClass(ResultRegister, &SPIRV::IDRegClass); - GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF()); return std::make_tuple(ResultRegister, BoolType); } @@ -301,17 +301,17 @@ static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { Register TrueConst, FalseConst; if (ReturnType->getOpcode() == SPIRV::OpTypeVector) { - unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType); + unsigned Bits = GOR->getScalarOrVectorBitWidth(ReturnType); uint64_t AllOnes = APInt::getAllOnes(Bits).getZExtValue(); - TrueConst = GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType); - FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType); + TrueConst = GOR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType); + FalseConst = GOR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType); } else { - TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType); - FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType); + TrueConst = GOR->buildConstantInt(1, MIRBuilder, ReturnType); + FalseConst = GOR->buildConstantInt(0, MIRBuilder, ReturnType); } return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst, FalseConst); @@ -321,13 +321,13 @@ /// \p DestinationReg. static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR, LLT LowLevelType, + SPIRVGlobalObjectRegistry *GOR, LLT LowLevelType, Register DestinationReg = Register(0)) { MachineRegisterInfo *MRI = MIRBuilder.getMRI(); if (!DestinationReg.isValid()) { DestinationReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); MRI->setType(DestinationReg, LLT::scalar(32)); - GR->assignSPIRVTypeToVReg(BaseType, DestinationReg, MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(BaseType, DestinationReg, MIRBuilder.getMF()); } // TODO: consider using correct address space and alignment (p0 is canonical // type for selection though). @@ -340,27 +340,27 @@ /// variable of \p BuiltinValue value. static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, - SPIRVGlobalRegistry *GR, + SPIRVGlobalObjectRegistry *GOR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg = Register(0)) { Register NewRegister = MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass); MIRBuilder.getMRI()->setType(NewRegister, - LLT::pointer(0, GR->getPointerSize())); - SPIRVType *PtrType = GR->getOrCreateSPIRVPointerType( + LLT::pointer(0, GOR->getPointerSize())); + SPIRVType *PtrType = GOR->getOrCreateSPIRVPointerType( VariableType, MIRBuilder, SPIRV::StorageClass::Input); - GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF()); // Set up the global OpVariable with the necessary builtin decorations. - Register Variable = GR->buildGlobalVariable( + Register Variable = GOR->buildGlobalVariable( NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr, SPIRV::StorageClass::Input, nullptr, true, true, SPIRV::LinkageType::Import, MIRBuilder, false); // Load the value from the global variable. Register LoadedRegister = - buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg); + buildLoadInst(VariableType, Variable, MIRBuilder, GOR, LLType, Reg); MIRBuilder.getMRI()->setType(LoadedRegister, LLType); return LoadedRegister; } @@ -371,7 +371,7 @@ /// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in /// SPIRVPreLegalizer.cpp. extern Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, - SPIRVGlobalRegistry *GR, + SPIRVGlobalObjectRegistry *GOR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI); @@ -411,16 +411,16 @@ } static Register buildConstantIntReg(uint64_t Val, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR, + SPIRVGlobalObjectRegistry *GOR, unsigned BitWidth = 32) { - SPIRVType *IntType = GR->getOrCreateSPIRVIntegerType(BitWidth, MIRBuilder); - return GR->buildConstantInt(Val, MIRBuilder, IntType); + SPIRVType *IntType = GOR->getOrCreateSPIRVIntegerType(BitWidth, MIRBuilder); + return GOR->buildConstantInt(Val, MIRBuilder, IntType); } static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR, + SPIRVGlobalObjectRegistry *GOR, MachineRegisterInfo *MRI) { if (CLScopeRegister.isValid()) { auto CLScope = @@ -432,27 +432,27 @@ return CLScopeRegister; } } - return buildConstantIntReg(Scope, MIRBuilder, GR); + return buildConstantIntReg(Scope, MIRBuilder, GOR); } static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { if (SemanticsRegister.isValid()) { MachineRegisterInfo *MRI = MIRBuilder.getMRI(); std::memory_order Order = static_cast(getIConstVal(SemanticsRegister, MRI)); - Semantics = - getSPIRVMemSemantics(Order) | - getMemSemanticsForStorageClass(GR->getPointerStorageClass(PtrRegister)); + Semantics = getSPIRVMemSemantics(Order) | + getMemSemanticsForStorageClass( + GOR->getPointerStorageClass(PtrRegister)); if (Order == Semantics) { MRI->setRegClass(SemanticsRegister, &SPIRV::IDRegClass); return SemanticsRegister; } } - return buildConstantIntReg(Semantics, MIRBuilder, GR); + return buildConstantIntReg(Semantics, MIRBuilder, GOR); } /// Helper function for translating atomic init to OpStore. @@ -471,7 +471,7 @@ /// Helper function for building an atomic load instruction. static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { Register PtrRegister = Call->Arguments[0]; MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass); // TODO: if true insert call to __translate_ocl_memory_sccope before @@ -482,7 +482,7 @@ ScopeRegister = Call->Arguments[1]; MIRBuilder.getMRI()->setRegClass(ScopeRegister, &SPIRV::IDRegClass); } else - ScopeRegister = buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR); + ScopeRegister = buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GOR); Register MemSemanticsReg; if (Call->Arguments.size() > 2) { @@ -490,15 +490,15 @@ MemSemanticsReg = Call->Arguments[2]; MIRBuilder.getMRI()->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass); } else { - int Semantics = - SPIRV::MemorySemantics::SequentiallyConsistent | - getMemSemanticsForStorageClass(GR->getPointerStorageClass(PtrRegister)); - MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR); + int Semantics = SPIRV::MemorySemantics::SequentiallyConsistent | + getMemSemanticsForStorageClass( + GOR->getPointerStorageClass(PtrRegister)); + MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GOR); } MIRBuilder.buildInstr(SPIRV::OpAtomicLoad) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(PtrRegister) .addUse(ScopeRegister) .addUse(MemSemanticsReg); @@ -508,15 +508,15 @@ /// Helper function for building an atomic store instruction. static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { Register ScopeRegister = - buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR); + buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GOR); Register PtrRegister = Call->Arguments[0]; MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass); int Semantics = SPIRV::MemorySemantics::SequentiallyConsistent | - getMemSemanticsForStorageClass(GR->getPointerStorageClass(PtrRegister)); - Register MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR); + getMemSemanticsForStorageClass(GOR->getPointerStorageClass(PtrRegister)); + Register MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GOR); MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); MIRBuilder.buildInstr(SPIRV::OpAtomicStore) .addUse(PtrRegister) @@ -529,7 +529,7 @@ /// Helper function for building an atomic compare-exchange instruction. static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; unsigned Opcode = SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode; @@ -542,17 +542,17 @@ MRI->setRegClass(ObjectPtr, &SPIRV::IDRegClass); MRI->setRegClass(ExpectedArg, &SPIRV::IDRegClass); MRI->setRegClass(Desired, &SPIRV::IDRegClass); - SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired); + SPIRVType *SpvDesiredTy = GOR->getSPIRVTypeForVReg(Desired); LLT DesiredLLT = MRI->getType(Desired); - assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() == + assert(GOR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() == SPIRV::OpTypePointer); - unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode(); + unsigned ExpectedType = GOR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode(); assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt : ExpectedType == SPIRV::OpTypePointer); - assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt)); + assert(GOR->isScalarOfType(Desired, SPIRV::OpTypeInt)); - SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr); + SPIRVType *SpvObjectPtrTy = GOR->getSPIRVTypeForVReg(ObjectPtr); assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected"); auto StorageClass = static_cast( SpvObjectPtrTy->getOperand(1).getImm()); @@ -585,9 +585,9 @@ MRI->setRegClass(Call->Arguments[4], &SPIRV::IDRegClass); } if (!MemSemEqualReg.isValid()) - MemSemEqualReg = buildConstantIntReg(MemSemEqual, MIRBuilder, GR); + MemSemEqualReg = buildConstantIntReg(MemSemEqual, MIRBuilder, GOR); if (!MemSemUnequalReg.isValid()) - MemSemUnequalReg = buildConstantIntReg(MemSemUnequal, MIRBuilder, GR); + MemSemUnequalReg = buildConstantIntReg(MemSemUnequal, MIRBuilder, GOR); Register ScopeReg; auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device; @@ -602,23 +602,23 @@ MRI->setRegClass(Call->Arguments[5], &SPIRV::IDRegClass); } if (!ScopeReg.isValid()) - ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR); + ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GOR); Register Expected = IsCmpxchg ? ExpectedArg : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder, - GR, LLT::scalar(32)); + GOR, LLT::scalar(32)); MRI->setType(Expected, DesiredLLT); Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT) : Call->ReturnRegister; if (!MRI->getRegClassOrNull(Tmp)) MRI->setRegClass(Tmp, &SPIRV::IDRegClass); - GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF()); - SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder); + SPIRVType *IntTy = GOR->getOrCreateSPIRVIntegerType(32, MIRBuilder); MIRBuilder.buildInstr(Opcode) .addDef(Tmp) - .addUse(GR->getSPIRVTypeID(IntTy)) + .addUse(GOR->getSPIRVTypeID(IntTy)) .addUse(ObjectPtr) .addUse(ScopeReg) .addUse(MemSemEqualReg) @@ -635,7 +635,7 @@ /// Helper function for building an atomic load instruction. static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { MachineRegisterInfo *MRI = MIRBuilder.getMRI(); Register ScopeRegister = Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register(); @@ -643,7 +643,7 @@ assert(Call->Arguments.size() <= 4 && "Too many args for explicit atomic RMW"); ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup, - MIRBuilder, GR, MRI); + MIRBuilder, GOR, MRI); Register PtrRegister = Call->Arguments[0]; unsigned Semantics = SPIRV::MemorySemantics::None; @@ -651,11 +651,11 @@ Register MemSemanticsReg = Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register(); MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister, - Semantics, MIRBuilder, GR); + Semantics, MIRBuilder, GOR); MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); MIRBuilder.buildInstr(Opcode) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(PtrRegister) .addUse(ScopeRegister) .addUse(MemSemanticsReg) @@ -667,14 +667,14 @@ /// OpAtomicFlagTestAndSet). static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { MachineRegisterInfo *MRI = MIRBuilder.getMRI(); Register PtrRegister = Call->Arguments[0]; unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent; Register MemSemanticsReg = Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register(); MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister, - Semantics, MIRBuilder, GR); + Semantics, MIRBuilder, GOR); assert((Opcode != SPIRV::OpAtomicFlagClear || (Semantics != SPIRV::MemorySemantics::Acquire && @@ -684,12 +684,12 @@ Register ScopeRegister = Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register(); ScopeRegister = - buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI); + buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GOR, MRI); auto MIB = MIRBuilder.buildInstr(Opcode); if (Opcode == SPIRV::OpAtomicFlagTestAndSet) MIB.addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)); + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)); MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg); return true; @@ -699,7 +699,7 @@ /// operations. static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { MachineRegisterInfo *MRI = MIRBuilder.getMRI(); unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI); unsigned MemSemantics = SPIRV::MemorySemantics::None; @@ -726,7 +726,7 @@ MemSemanticsReg = Call->Arguments[0]; MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass); } else - MemSemanticsReg = buildConstantIntReg(MemSemantics, MIRBuilder, GR); + MemSemanticsReg = buildConstantIntReg(MemSemantics, MIRBuilder, GOR); Register ScopeReg; SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup; @@ -752,11 +752,11 @@ } if (!ScopeReg.isValid()) - ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR); + ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GOR); auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg); if (Opcode != SPIRV::OpMemoryBarrier) - MIB.addUse(buildConstantIntReg(MemScope, MIRBuilder, GR)); + MIB.addUse(buildConstantIntReg(MemScope, MIRBuilder, GOR)); MIB.addUse(MemSemanticsReg); return true; } @@ -792,7 +792,7 @@ static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the extended instruction number in the TableGen records. const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; uint32_t Number = @@ -802,7 +802,7 @@ auto MIB = MIRBuilder.buildInstr(SPIRV::OpExtInst) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addImm(static_cast(SPIRV::InstructionSet::OpenCL_std)) .addImm(Number); @@ -813,7 +813,7 @@ static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the instruction opcode in the TableGen records. const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; unsigned Opcode = @@ -822,24 +822,24 @@ Register CompareRegister; SPIRVType *RelationType; std::tie(CompareRegister, RelationType) = - buildBoolRegister(MIRBuilder, Call->ReturnType, GR); + buildBoolRegister(MIRBuilder, Call->ReturnType, GOR); // Build relational instruction. auto MIB = MIRBuilder.buildInstr(Opcode) .addDef(CompareRegister) - .addUse(GR->getSPIRVTypeID(RelationType)); + .addUse(GOR->getSPIRVTypeID(RelationType)); for (auto Argument : Call->Arguments) MIB.addUse(Argument); // Build select instruction. return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister, - Call->ReturnType, GR); + Call->ReturnType, GOR); } static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; const SPIRV::GroupBuiltin *GroupBuiltin = SPIRV::lookupGroupBuiltin(Builtin->Name); @@ -851,10 +851,10 @@ // TODO: support non-constant bool values. assert(ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT && "Only constant bool value args are supported"); - if (GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() != + if (GOR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() != SPIRV::OpTypeBool) - Arg0 = GR->buildConstantInt(getIConstVal(ConstRegister, MRI), MIRBuilder, - GR->getOrCreateSPIRVBoolType(MIRBuilder)); + Arg0 = GOR->buildConstantInt(getIConstVal(ConstRegister, MRI), MIRBuilder, + GOR->getOrCreateSPIRVBoolType(MIRBuilder)); } Register GroupResultRegister = Call->ReturnRegister; @@ -869,16 +869,16 @@ if (HasBoolReturnTy) std::tie(GroupResultRegister, GroupResultType) = - buildBoolRegister(MIRBuilder, Call->ReturnType, GR); + buildBoolRegister(MIRBuilder, Call->ReturnType, GOR); auto Scope = Builtin->Name.startswith("sub_group") ? SPIRV::Scope::Subgroup : SPIRV::Scope::Workgroup; - Register ScopeRegister = buildConstantIntReg(Scope, MIRBuilder, GR); + Register ScopeRegister = buildConstantIntReg(Scope, MIRBuilder, GOR); // Build work/sub group instruction. auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode) .addDef(GroupResultRegister) - .addUse(GR->getSPIRVTypeID(GroupResultType)) + .addUse(GOR->getSPIRVTypeID(GroupResultType)) .addUse(ScopeRegister); if (!GroupBuiltin->NoGroupOperation) @@ -895,7 +895,7 @@ // Build select instruction. if (HasBoolReturnTy) buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister, - Call->ReturnType, GR); + Call->ReturnType, GOR); return true; } @@ -927,14 +927,14 @@ // extend or truncate. static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR, + SPIRVGlobalObjectRegistry *GOR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue) { Register IndexRegister = Call->Arguments[0]; const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm(); - const unsigned PointerSize = GR->getPointerSize(); + const unsigned PointerSize = GOR->getPointerSize(); const SPIRVType *PointerSizeType = - GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder); + GOR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder); MachineRegisterInfo *MRI = MIRBuilder.getMRI(); auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI); @@ -952,25 +952,26 @@ if (PointerSize != ResultWidth) { DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize)); MRI->setRegClass(DefaultReg, &SPIRV::IDRegClass); - GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg, - MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg, + MIRBuilder.getMF()); ToTruncate = DefaultReg; } auto NewRegister = - GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType); + GOR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType); MIRBuilder.buildCopy(DefaultReg, NewRegister); } else { // If it could be in range, we need to load from the given builtin. auto Vec3Ty = - GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder); + GOR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder); Register LoadedVector = - buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue, + buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GOR, BuiltinValue, LLT::fixed_vector(3, PointerSize)); // Set up the vreg to extract the result to (possibly a new temporary one). Register Extracted = Call->ReturnRegister; if (!IsConstantIndex || PointerSize != ResultWidth) { Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize)); MRI->setRegClass(Extracted, &SPIRV::IDRegClass); - GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, + MIRBuilder.getMF()); } // Use Intrinsic::spv_extractelt so dynamic vs static extraction is // handled later: extr = spv_extractelt LoadedVector, IndexRegister. @@ -980,25 +981,25 @@ // If the index is dynamic, need check if it's < 3, and then use a select. if (!IsConstantIndex) { - insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder, + insertAssignInstr(Extracted, nullptr, PointerSizeType, GOR, MIRBuilder, *MRI); - auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister); - auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder); + auto IndexType = GOR->getSPIRVTypeForVReg(IndexRegister); + auto BoolType = GOR->getOrCreateSPIRVBoolType(MIRBuilder); Register CompareRegister = MRI->createGenericVirtualRegister(LLT::scalar(1)); MRI->setRegClass(CompareRegister, &SPIRV::IDRegClass); - GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF()); // Use G_ICMP to check if idxVReg < 3. MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CompareRegister, IndexRegister, - GR->buildConstantInt(3, MIRBuilder, IndexType)); + GOR->buildConstantInt(3, MIRBuilder, IndexType)); // Get constant for the default value (0 or 1 depending on which // function). Register DefaultRegister = - GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType); + GOR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType); // Get a register for the selection result (possibly a new temporary one). Register SelectionResult = Call->ReturnRegister; @@ -1006,8 +1007,8 @@ SelectionResult = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize)); MRI->setRegClass(SelectionResult, &SPIRV::IDRegClass); - GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult, - MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult, + MIRBuilder.getMF()); } // Create the final G_SELECT to return the extracted value or the default. MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted, @@ -1025,17 +1026,17 @@ static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the builtin variable record. const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; SPIRV::BuiltIn::BuiltIn Value = SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value; if (Value == SPIRV::BuiltIn::GlobalInvocationId) - return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0); + return genWorkgroupQuery(Call, MIRBuilder, GOR, Value, 0); // Build a load instruction for the builtin variable. - unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType); + unsigned BitWidth = GOR->getScalarOrVectorBitWidth(Call->ReturnType); LLT LLType; if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector) LLType = @@ -1043,13 +1044,13 @@ else LLType = LLT::scalar(BitWidth); - return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value, + return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GOR, Value, LLType, Call->ReturnRegister); } static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the instruction opcode in the TableGen records. const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; unsigned Opcode = @@ -1059,24 +1060,24 @@ case SPIRV::OpStore: return buildAtomicInitInst(Call, MIRBuilder); case SPIRV::OpAtomicLoad: - return buildAtomicLoadInst(Call, MIRBuilder, GR); + return buildAtomicLoadInst(Call, MIRBuilder, GOR); case SPIRV::OpAtomicStore: - return buildAtomicStoreInst(Call, MIRBuilder, GR); + return buildAtomicStoreInst(Call, MIRBuilder, GOR); case SPIRV::OpAtomicCompareExchange: case SPIRV::OpAtomicCompareExchangeWeak: - return buildAtomicCompareExchangeInst(Call, MIRBuilder, GR); + return buildAtomicCompareExchangeInst(Call, MIRBuilder, GOR); case SPIRV::OpAtomicIAdd: case SPIRV::OpAtomicISub: case SPIRV::OpAtomicOr: case SPIRV::OpAtomicXor: case SPIRV::OpAtomicAnd: case SPIRV::OpAtomicExchange: - return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR); + return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GOR); case SPIRV::OpMemoryBarrier: - return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR); + return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GOR); case SPIRV::OpAtomicFlagTestAndSet: case SPIRV::OpAtomicFlagClear: - return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR); + return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GOR); default: return false; } @@ -1084,24 +1085,24 @@ static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the instruction opcode in the TableGen records. const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; unsigned Opcode = SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode; - return buildBarrierInst(Call, Opcode, MIRBuilder, GR); + return buildBarrierInst(Call, Opcode, MIRBuilder, GOR); } static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { - unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode(); + SPIRVGlobalObjectRegistry *GOR) { + unsigned Opcode = GOR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode(); bool IsVec = Opcode == SPIRV::OpTypeVector; // Use OpDot only in case of vector args and OpFMul in case of scalar args. MIRBuilder.buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(Call->Arguments[0]) .addUse(Call->Arguments[1]); return true; @@ -1109,19 +1110,19 @@ static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the builtin record. SPIRV::BuiltIn::BuiltIn Value = SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value; uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize || Value == SPIRV::BuiltIn::WorkgroupSize || Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize); - return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0); + return genWorkgroupQuery(Call, MIRBuilder, GOR, Value, IsDefault ? 1 : 0); } static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the image size query component number in the TableGen records. const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; uint32_t Component = @@ -1134,7 +1135,7 @@ ? RetTy->getOperand(2).getImm() : 1; // Get the actual number of query result/size components. - SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]); + SPIRVType *ImgType = GOR->getSPIRVTypeForVReg(Call->Arguments[0]); unsigned NumActualRetComponents = getNumSizeComponents(ImgType); Register QueryResult = Call->ReturnRegister; SPIRVType *QueryResultType = Call->ReturnType; @@ -1142,10 +1143,11 @@ QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister( LLT::fixed_vector(NumActualRetComponents, 32)); MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::IDRegClass); - SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder); - QueryResultType = GR->getOrCreateSPIRVVectorType( + SPIRVType *IntTy = GOR->getOrCreateSPIRVIntegerType(32, MIRBuilder); + QueryResultType = GOR->getOrCreateSPIRVVectorType( IntTy, NumActualRetComponents, MIRBuilder); - GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, + MIRBuilder.getMF()); } bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer; unsigned Opcode = @@ -1153,10 +1155,10 @@ MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); auto MIB = MIRBuilder.buildInstr(Opcode) .addDef(QueryResult) - .addUse(GR->getSPIRVTypeID(QueryResultType)) + .addUse(GOR->getSPIRVTypeID(QueryResultType)) .addUse(Call->Arguments[0]); if (!IsDimBuf) - MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Lod id. + MIB.addUse(buildConstantIntReg(0, MIRBuilder, GOR)); // Lod id. if (NumExpectedRetComponents == NumActualRetComponents) return true; if (NumExpectedRetComponents == 1) { @@ -1167,14 +1169,14 @@ "Invalid composite index!"); MIRBuilder.buildInstr(SPIRV::OpCompositeExtract) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(QueryResult) .addImm(ExtractedComposite); } else { // More than 1 component is expected, fill a new vector. auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(QueryResult) .addUse(QueryResult); for (unsigned i = 0; i < NumExpectedRetComponents; ++i) @@ -1185,7 +1187,7 @@ static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt && "Image samples query result must be of int type!"); @@ -1197,7 +1199,7 @@ Register Image = Call->Arguments[0]; MIRBuilder.getMRI()->setRegClass(Image, &SPIRV::IDRegClass); SPIRV::Dim::Dim ImageDimensionality = static_cast( - GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm()); + GOR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm()); switch (Opcode) { case SPIRV::OpImageQuerySamples: @@ -1215,7 +1217,7 @@ MIRBuilder.buildInstr(Opcode) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(Image); return true; } @@ -1255,7 +1257,7 @@ static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { Register Image = Call->Arguments[0]; MachineRegisterInfo *MRI = MIRBuilder.getMRI(); MRI->setRegClass(Image, &SPIRV::IDRegClass); @@ -1267,43 +1269,43 @@ if (HasOclSampler) { Register Sampler = Call->Arguments[1]; - if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) && + if (!GOR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) && getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) { uint64_t SamplerMask = getIConstVal(Sampler, MRI); - Sampler = GR->buildConstantSampler( + Sampler = GOR->buildConstantSampler( Register(), getSamplerAddressingModeFromBitmask(SamplerMask), getSamplerParamFromBitmask(SamplerMask), getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder, - GR->getSPIRVTypeForVReg(Sampler)); + GOR->getSPIRVTypeForVReg(Sampler)); } - SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image); + SPIRVType *ImageType = GOR->getSPIRVTypeForVReg(Image); SPIRVType *SampledImageType = - GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder); + GOR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder); Register SampledImage = MRI->createVirtualRegister(&SPIRV::IDRegClass); MIRBuilder.buildInstr(SPIRV::OpSampledImage) .addDef(SampledImage) - .addUse(GR->getSPIRVTypeID(SampledImageType)) + .addUse(GOR->getSPIRVTypeID(SampledImageType)) .addUse(Image) .addUse(Sampler); - Register Lod = GR->buildConstantFP(APFloat::getZero(APFloat::IEEEsingle()), - MIRBuilder); + Register Lod = GOR->buildConstantFP(APFloat::getZero(APFloat::IEEEsingle()), + MIRBuilder); SPIRVType *TempType = Call->ReturnType; bool NeedsExtraction = false; if (TempType->getOpcode() != SPIRV::OpTypeVector) { TempType = - GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder); + GOR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder); NeedsExtraction = true; } - LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(TempType)); + LLT LLType = LLT::scalar(GOR->getScalarOrVectorBitWidth(TempType)); Register TempRegister = MRI->createGenericVirtualRegister(LLType); MRI->setRegClass(TempRegister, &SPIRV::IDRegClass); - GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF()); MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod) .addDef(NeedsExtraction ? TempRegister : Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(TempType)) + .addUse(GOR->getSPIRVTypeID(TempType)) .addUse(SampledImage) .addUse(Call->Arguments[2]) // Coordinate. .addImm(SPIRV::ImageOperand::Lod) @@ -1312,13 +1314,13 @@ if (NeedsExtraction) MIRBuilder.buildInstr(SPIRV::OpCompositeExtract) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(TempRegister) .addImm(0); } else if (HasMsaa) { MIRBuilder.buildInstr(SPIRV::OpImageRead) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(Image) .addUse(Call->Arguments[1]) // Coordinate. .addImm(SPIRV::ImageOperand::Sample) @@ -1326,7 +1328,7 @@ } else { MIRBuilder.buildInstr(SPIRV::OpImageRead) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(Image) .addUse(Call->Arguments[1]); // Coordinate. } @@ -1335,7 +1337,7 @@ static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass); @@ -1349,13 +1351,13 @@ static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { MachineRegisterInfo *MRI = MIRBuilder.getMRI(); if (Call->Builtin->Name.contains_insensitive( "__translate_sampler_initializer")) { // Build sampler literal. uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI); - Register Sampler = GR->buildConstantSampler( + Register Sampler = GOR->buildConstantSampler( Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask), getSamplerParamFromBitmask(Bitmask), getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType); @@ -1363,16 +1365,16 @@ } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) { // Create OpSampledImage. Register Image = Call->Arguments[0]; - SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image); + SPIRVType *ImageType = GOR->getSPIRVTypeForVReg(Image); SPIRVType *SampledImageType = - GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder); + GOR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder); Register SampledImage = Call->ReturnRegister.isValid() ? Call->ReturnRegister : MRI->createVirtualRegister(&SPIRV::IDRegClass); MIRBuilder.buildInstr(SPIRV::OpSampledImage) .addDef(SampledImage) - .addUse(GR->getSPIRVTypeID(SampledImageType)) + .addUse(GOR->getSPIRVTypeID(SampledImageType)) .addUse(Image) .addUse(Call->Arguments[1]); // Sampler. return true; @@ -1384,14 +1386,14 @@ ReturnType = ReturnType.substr(ReturnType.find("_R") + 2); ReturnType = ReturnType.substr(0, ReturnType.find('(')); } - SPIRVType *Type = GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder); + SPIRVType *Type = GOR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder); MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass); MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Type)) + .addUse(GOR->getSPIRVTypeID(Type)) .addUse(Call->Arguments[0]) // Image. .addUse(Call->Arguments[1]) // Coordinate. .addImm(SPIRV::ImageOperand::Lod) @@ -1410,7 +1412,7 @@ static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the instruction opcode in the TableGen records. const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; unsigned Opcode = @@ -1441,7 +1443,7 @@ } auto MIB = MIRBuilder.buildInstr(Opcode) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)); + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)); if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) { if (Const->getOpcode() == TargetOpcode::G_CONSTANT) @@ -1454,7 +1456,7 @@ case SPIRV::OpSpecConstantComposite: { auto MIB = MIRBuilder.buildInstr(Opcode) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)); + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)); for (unsigned i = 0; i < Call->Arguments.size(); i++) MIB.addUse(Call->Arguments[i]); return true; @@ -1466,17 +1468,17 @@ static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { MachineRegisterInfo *MRI = MIRBuilder.getMRI(); MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); - SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]); + SPIRVType *PtrType = GOR->getSPIRVTypeForVReg(Call->Arguments[0]); assert(PtrType->getOpcode() == SPIRV::OpTypePointer && PtrType->getOperand(2).isReg()); Register TypeReg = PtrType->getOperand(2).getReg(); - SPIRVType *StructType = GR->getSPIRVTypeForVReg(TypeReg); + SPIRVType *StructType = GOR->getSPIRVTypeForVReg(TypeReg); MachineFunction &MF = MIRBuilder.getMF(); Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); - GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF); + GOR->assignSPIRVTypeToVReg(StructType, TmpReg, MF); // Skip the first arg, it's the destination pointer. OpBuildNDRange takes // three other arguments, so pass zero constant on absence. unsigned NumArgs = Call->Arguments.size(); @@ -1492,7 +1494,7 @@ MRI->setRegClass(GlobalWorkOffset, &SPIRV::IDRegClass); if (NumArgs < 4) { Register Const; - SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize); + SPIRVType *SpvTy = GOR->getSPIRVTypeForVReg(GlobalWorkSize); if (SpvTy->getOpcode() == SPIRV::OpTypePointer) { MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize); assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) && @@ -1502,19 +1504,19 @@ MRI->setRegClass(GWSPtr, &SPIRV::IDRegClass); // TODO: Maybe simplify generation of the type of the fields. unsigned Size = Call->Builtin->Name.equals("ndrange_3D") ? 3 : 2; - unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32; + unsigned BitWidth = GOR->getPointerSize() == 64 ? 64 : 32; Type *BaseTy = IntegerType::get(MF.getFunction().getContext(), BitWidth); Type *FieldTy = ArrayType::get(BaseTy, Size); - SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(FieldTy, MIRBuilder); + SPIRVType *SpvFieldTy = GOR->getOrCreateSPIRVType(FieldTy, MIRBuilder); GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::IDRegClass); - GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF); + GOR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF); MIRBuilder.buildInstr(SPIRV::OpLoad) .addDef(GlobalWorkSize) - .addUse(GR->getSPIRVTypeID(SpvFieldTy)) + .addUse(GOR->getSPIRVTypeID(SpvFieldTy)) .addUse(GWSPtr); - Const = GR->getOrCreateConsIntArray(0, MIRBuilder, SpvFieldTy); + Const = GOR->getOrCreateConsIntArray(0, MIRBuilder, SpvFieldTy); } else { - Const = GR->buildConstantInt(0, MIRBuilder, SpvTy); + Const = GOR->buildConstantInt(0, MIRBuilder, SpvTy); } if (!LocalWorkSize.isValid()) LocalWorkSize = Const; @@ -1598,7 +1600,7 @@ // TODO: maybe move to the global register. static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext(); Type *OpaqueType = StructType::getTypeByName(Context, "spirv.DeviceEvent"); if (!OpaqueType) @@ -1608,16 +1610,16 @@ unsigned SC0 = storageClassToAddressSpace(SPIRV::StorageClass::Function); unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic); Type *PtrType = PointerType::get(PointerType::get(OpaqueType, SC0), SC1); - return GR->getOrCreateSPIRVType(PtrType, MIRBuilder); + return GOR->getOrCreateSPIRVType(PtrType, MIRBuilder); } static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { MachineRegisterInfo *MRI = MIRBuilder.getMRI(); const DataLayout &DL = MIRBuilder.getDataLayout(); bool HasEvents = Call->Builtin->Name.find("events") != StringRef::npos; - const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder); + const SPIRVType *Int32Ty = GOR->getOrCreateSPIRVIntegerType(32, MIRBuilder); // Make vararg instructions before OpEnqueueKernel. // Local sizes arguments: Sizes of block invoke arguments. Clang generates @@ -1636,20 +1638,20 @@ const uint64_t LocalSizeNum = cast(LocalSizeTy)->getNumElements(); unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic); - const LLT LLType = LLT::pointer(SC, GR->getPointerSize()); - const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType( + const LLT LLType = LLT::pointer(SC, GOR->getPointerSize()); + const SPIRVType *PointerSizeTy = GOR->getOrCreateSPIRVPointerType( Int32Ty, MIRBuilder, SPIRV::StorageClass::Function); for (unsigned I = 0; I < LocalSizeNum; ++I) { Register Reg = MRI->createVirtualRegister(&SPIRV::IDRegClass); MRI->setType(Reg, LLType); - GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF()); auto GEPInst = MIRBuilder.buildIntrinsic(Intrinsic::spv_gep, ArrayRef{Reg}, true); GEPInst - .addImm(GepMI->getOperand(2).getImm()) // In bound. - .addUse(ArrayMI->getOperand(0).getReg()) // Alloca. - .addUse(buildConstantIntReg(0, MIRBuilder, GR)) // Indices. - .addUse(buildConstantIntReg(I, MIRBuilder, GR)); + .addImm(GepMI->getOperand(2).getImm()) // In bound. + .addUse(ArrayMI->getOperand(0).getReg()) // Alloca. + .addUse(buildConstantIntReg(0, MIRBuilder, GOR)) // Indices. + .addUse(buildConstantIntReg(I, MIRBuilder, GOR)); LocalSizes.push_back(Reg); } } @@ -1657,7 +1659,7 @@ // SPIRV OpEnqueueKernel instruction has 10+ arguments. auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Int32Ty)); + .addUse(GOR->getSPIRVTypeID(Int32Ty)); // Copy all arguments before block invoke function pointer. const unsigned BlockFIdx = HasEvents ? 6 : 3; @@ -1666,9 +1668,9 @@ // If there are no event arguments in the original call, add dummy ones. if (!HasEvents) { - MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Dummy num events. - Register NullPtr = GR->getOrCreateConstNullPtr( - MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR)); + MIB.addUse(buildConstantIntReg(0, MIRBuilder, GOR)); // Dummy num events. + Register NullPtr = GOR->getOrCreateConstNullPtr( + MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GOR)); MIB.addUse(NullPtr); // Dummy wait events. MIB.addUse(NullPtr); // Dummy ret event. } @@ -1685,10 +1687,10 @@ Type *PType = const_cast(getBlockStructType(BlockLiteralReg, MRI)); // TODO: these numbers should be obtained from block literal structure. // Param Size: Size of block literal structure. - MIB.addUse(buildConstantIntReg(DL.getTypeStoreSize(PType), MIRBuilder, GR)); + MIB.addUse(buildConstantIntReg(DL.getTypeStoreSize(PType), MIRBuilder, GOR)); // Param Aligment: Aligment of block literal structure. MIB.addUse( - buildConstantIntReg(DL.getPrefTypeAlign(PType).value(), MIRBuilder, GR)); + buildConstantIntReg(DL.getPrefTypeAlign(PType).value(), MIRBuilder, GOR)); for (unsigned i = 0; i < LocalSizes.size(); i++) MIB.addUse(LocalSizes[i]); @@ -1697,7 +1699,7 @@ static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the instruction opcode in the TableGen records. const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; unsigned Opcode = @@ -1712,12 +1714,12 @@ case SPIRV::OpGetDefaultQueue: return MIRBuilder.buildInstr(Opcode) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)); + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)); case SPIRV::OpIsValidEvent: MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); return MIRBuilder.buildInstr(Opcode) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(Call->Arguments[0]); case SPIRV::OpSetUserEventStatus: MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); @@ -1734,9 +1736,9 @@ .addUse(Call->Arguments[1]) .addUse(Call->Arguments[2]); case SPIRV::OpBuildNDRange: - return buildNDRange(Call, MIRBuilder, GR); + return buildNDRange(Call, MIRBuilder, GOR); case SPIRV::OpEnqueueKernel: - return buildEnqueueKernel(Call, MIRBuilder, GR); + return buildEnqueueKernel(Call, MIRBuilder, GOR); default: return false; } @@ -1744,23 +1746,23 @@ static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the instruction opcode in the TableGen records. const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; unsigned Opcode = SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode; - auto Scope = buildConstantIntReg(SPIRV::Scope::Workgroup, MIRBuilder, GR); + auto Scope = buildConstantIntReg(SPIRV::Scope::Workgroup, MIRBuilder, GOR); switch (Opcode) { case SPIRV::OpGroupAsyncCopy: return MIRBuilder.buildInstr(Opcode) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(Scope) .addUse(Call->Arguments[0]) .addUse(Call->Arguments[1]) .addUse(Call->Arguments[2]) - .addUse(buildConstantIntReg(1, MIRBuilder, GR)) + .addUse(buildConstantIntReg(1, MIRBuilder, GOR)) .addUse(Call->Arguments[3]); case SPIRV::OpGroupWaitEvents: return MIRBuilder.buildInstr(Opcode) @@ -1775,7 +1777,7 @@ static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the conversion builtin in the TableGen records. const SPIRV::ConvertBuiltin *Builtin = SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set); @@ -1789,9 +1791,9 @@ {(unsigned)Builtin->RoundingMode}); unsigned Opcode = SPIRV::OpNop; - if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) { + if (GOR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) { // Int -> ... - if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) { + if (GOR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) { // Int -> Int if (Builtin->IsSaturated) Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS @@ -1799,22 +1801,22 @@ else Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert : SPIRV::OpSConvert; - } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister, - SPIRV::OpTypeFloat)) { + } else if (GOR->isScalarOrVectorOfType(Call->ReturnRegister, + SPIRV::OpTypeFloat)) { // Int -> Float bool IsSourceSigned = DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u'; Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF; } - } else if (GR->isScalarOrVectorOfType(Call->Arguments[0], - SPIRV::OpTypeFloat)) { + } else if (GOR->isScalarOrVectorOfType(Call->Arguments[0], + SPIRV::OpTypeFloat)) { // Float -> ... - if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) + if (GOR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) // Float -> Int Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS : SPIRV::OpConvertFToU; - else if (GR->isScalarOrVectorOfType(Call->ReturnRegister, - SPIRV::OpTypeFloat)) + else if (GOR->isScalarOrVectorOfType(Call->ReturnRegister, + SPIRV::OpTypeFloat)) // Float -> Float Opcode = SPIRV::OpFConvert; } @@ -1824,14 +1826,14 @@ MIRBuilder.buildInstr(Opcode) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addUse(Call->Arguments[0]); return true; } static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the vector load/store builtin in the TableGen records. const SPIRV::VectorLoadStoreBuiltin *Builtin = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name, @@ -1840,7 +1842,7 @@ auto MIB = MIRBuilder.buildInstr(SPIRV::OpExtInst) .addDef(Call->ReturnRegister) - .addUse(GR->getSPIRVTypeID(Call->ReturnType)) + .addUse(GOR->getSPIRVTypeID(Call->ReturnType)) .addImm(static_cast(SPIRV::InstructionSet::OpenCL_std)) .addImm(Builtin->Number); for (auto Argument : Call->Arguments) @@ -1855,7 +1857,7 @@ static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Lookup the instruction opcode in the TableGen records. const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; unsigned Opcode = @@ -1865,7 +1867,7 @@ auto MIB = MIRBuilder.buildInstr(Opcode); if (IsLoad) { MIB.addDef(Call->ReturnRegister); - MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType)); + MIB.addUse(GOR->getSPIRVTypeID(Call->ReturnType)); } // Add a pointer to the value to load/store. MIB.addUse(Call->Arguments[0]); @@ -1897,20 +1899,20 @@ MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl &Args, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n"); // SPIR-V type and return register. Register ReturnRegister = OrigRet; SPIRVType *ReturnType = nullptr; if (OrigRetTy && !OrigRetTy->isVoidTy()) { - ReturnType = GR->assignTypeToVReg(OrigRetTy, OrigRet, MIRBuilder); + ReturnType = GOR->assignTypeToVReg(OrigRetTy, OrigRet, MIRBuilder); if (!MIRBuilder.getMRI()->getRegClassOrNull(ReturnRegister)) MIRBuilder.getMRI()->setRegClass(ReturnRegister, &SPIRV::IDRegClass); } else if (OrigRetTy && OrigRetTy->isVoidTy()) { ReturnRegister = MIRBuilder.getMRI()->createVirtualRegister(&IDRegClass); MIRBuilder.getMRI()->setType(ReturnRegister, LLT::scalar(32)); - ReturnType = GR->assignTypeToVReg(OrigRetTy, ReturnRegister, MIRBuilder); + ReturnType = GOR->assignTypeToVReg(OrigRetTy, ReturnRegister, MIRBuilder); } // Lookup the builtin in the TableGen records. @@ -1931,45 +1933,45 @@ // Match the builtin with implementation based on the grouping. switch (Call->Builtin->Group) { case SPIRV::Extended: - return generateExtInst(Call.get(), MIRBuilder, GR); + return generateExtInst(Call.get(), MIRBuilder, GOR); case SPIRV::Relational: - return generateRelationalInst(Call.get(), MIRBuilder, GR); + return generateRelationalInst(Call.get(), MIRBuilder, GOR); case SPIRV::Group: - return generateGroupInst(Call.get(), MIRBuilder, GR); + return generateGroupInst(Call.get(), MIRBuilder, GOR); case SPIRV::Variable: - return generateBuiltinVar(Call.get(), MIRBuilder, GR); + return generateBuiltinVar(Call.get(), MIRBuilder, GOR); case SPIRV::Atomic: - return generateAtomicInst(Call.get(), MIRBuilder, GR); + return generateAtomicInst(Call.get(), MIRBuilder, GOR); case SPIRV::Barrier: - return generateBarrierInst(Call.get(), MIRBuilder, GR); + return generateBarrierInst(Call.get(), MIRBuilder, GOR); case SPIRV::Dot: - return generateDotOrFMulInst(Call.get(), MIRBuilder, GR); + return generateDotOrFMulInst(Call.get(), MIRBuilder, GOR); case SPIRV::GetQuery: - return generateGetQueryInst(Call.get(), MIRBuilder, GR); + return generateGetQueryInst(Call.get(), MIRBuilder, GOR); case SPIRV::ImageSizeQuery: - return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR); + return generateImageSizeQueryInst(Call.get(), MIRBuilder, GOR); case SPIRV::ImageMiscQuery: - return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR); + return generateImageMiscQueryInst(Call.get(), MIRBuilder, GOR); case SPIRV::ReadImage: - return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR); + return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GOR); case SPIRV::WriteImage: - return generateWriteImageInst(Call.get(), MIRBuilder, GR); + return generateWriteImageInst(Call.get(), MIRBuilder, GOR); case SPIRV::SampleImage: - return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR); + return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GOR); case SPIRV::Select: return generateSelectInst(Call.get(), MIRBuilder); case SPIRV::SpecConstant: - return generateSpecConstantInst(Call.get(), MIRBuilder, GR); + return generateSpecConstantInst(Call.get(), MIRBuilder, GOR); case SPIRV::Enqueue: - return generateEnqueueInst(Call.get(), MIRBuilder, GR); + return generateEnqueueInst(Call.get(), MIRBuilder, GOR); case SPIRV::AsyncCopy: - return generateAsyncCopy(Call.get(), MIRBuilder, GR); + return generateAsyncCopy(Call.get(), MIRBuilder, GOR); case SPIRV::Convert: - return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR); + return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GOR); case SPIRV::VectorLoadStore: - return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR); + return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GOR); case SPIRV::LoadStore: - return generateLoadStoreInst(Call.get(), MIRBuilder, GR); + return generateLoadStoreInst(Call.get(), MIRBuilder, GOR); } return false; } @@ -2070,41 +2072,41 @@ static SPIRVType *getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { unsigned Opcode = TypeRecord->Opcode; // Create or get an existing type from GlobalRegistry. - return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode); + return GOR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode); } static SPIRVType *getSamplerType(MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // Create or get an existing type from GlobalRegistry. - return GR->getOrCreateOpTypeSampler(MIRBuilder); + return GOR->getOrCreateOpTypeSampler(MIRBuilder); } static SPIRVType *getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { assert(ExtensionType->getNumIntParameters() == 1 && "Invalid number of parameters for SPIR-V pipe builtin!"); // Create or get an existing type from GlobalRegistry. - return GR->getOrCreateOpTypePipe(MIRBuilder, - SPIRV::AccessQualifier::AccessQualifier( - ExtensionType->getIntParameter(0))); + return GOR->getOrCreateOpTypePipe(MIRBuilder, + SPIRV::AccessQualifier::AccessQualifier( + ExtensionType->getIntParameter(0))); } static SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, - MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) { + MachineIRBuilder &MIRBuilder, SPIRVGlobalObjectRegistry *GOR) { assert(ExtensionType->getNumTypeParameters() == 1 && "SPIR-V image builtin type must have sampled type parameter!"); const SPIRVType *SampledType = - GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder); + GOR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder); assert(ExtensionType->getNumIntParameters() == 7 && "Invalid number of parameters for SPIR-V image builtin!"); // Create or get an existing type from GlobalRegistry. - return GR->getOrCreateOpTypeImage( + return GOR->getOrCreateOpTypeImage( MIRBuilder, SampledType, SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)), ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2), @@ -2118,18 +2120,18 @@ static SPIRVType *getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { SPIRVType *OpaqueImageType = getImageType( - OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR); + OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GOR); // Create or get an existing type from GlobalRegistry. - return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder); + return GOR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder); } namespace SPIRV { SPIRVType *lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either // target(...) target extension types or pointers-to-opaque-structs. The // approach relying on structs is deprecated and works only in the non-opaque @@ -2159,30 +2161,30 @@ SPIRVType *TargetType; switch (TypeRecord->Opcode) { case SPIRV::OpTypeImage: - TargetType = getImageType(BuiltinType, AccessQual, MIRBuilder, GR); + TargetType = getImageType(BuiltinType, AccessQual, MIRBuilder, GOR); break; case SPIRV::OpTypePipe: - TargetType = getPipeType(BuiltinType, MIRBuilder, GR); + TargetType = getPipeType(BuiltinType, MIRBuilder, GOR); break; case SPIRV::OpTypeDeviceEvent: - TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder); + TargetType = GOR->getOrCreateOpTypeDeviceEvent(MIRBuilder); break; case SPIRV::OpTypeSampler: - TargetType = getSamplerType(MIRBuilder, GR); + TargetType = getSamplerType(MIRBuilder, GOR); break; case SPIRV::OpTypeSampledImage: - TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR); + TargetType = getSampledImageType(BuiltinType, MIRBuilder, GOR); break; default: TargetType = - getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR); + getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GOR); break; } // Emit OpName instruction if a new OpType<...> instruction was added // (equivalent type was not found in GlobalRegistry). if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs()) - buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder); + buildOpName(GOR->getSPIRVTypeID(TargetType), Name, MIRBuilder); return TargetType; } Index: llvm/lib/Target/SPIRV/SPIRVCallLowering.h =================================================================== --- llvm/lib/Target/SPIRV/SPIRVCallLowering.h +++ llvm/lib/Target/SPIRV/SPIRVCallLowering.h @@ -13,21 +13,21 @@ #ifndef LLVM_LIB_TARGET_SPIRV_SPIRVCALLLOWERING_H #define LLVM_LIB_TARGET_SPIRV_SPIRVCALLLOWERING_H -#include "SPIRVGlobalRegistry.h" +#include "Registries/SPIRVGlobalObjectRegistry.h" #include "llvm/CodeGen/GlobalISel/CallLowering.h" namespace llvm { -class SPIRVGlobalRegistry; +class SPIRVGlobalObjectRegistry; class SPIRVTargetLowering; class SPIRVCallLowering : public CallLowering { private: // Used to create and assign function, argument, and return type information. - SPIRVGlobalRegistry *GR; + SPIRVGlobalObjectRegistry *GOR; public: - SPIRVCallLowering(const SPIRVTargetLowering &TLI, SPIRVGlobalRegistry *GR); + SPIRVCallLowering(const SPIRVTargetLowering &TLI, SPIRVGlobalObjectRegistry *GOR); // Built OpReturn or OpReturnValue. bool lowerReturn(MachineIRBuilder &MIRBuiler, const Value *Val, Index: llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp =================================================================== --- llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp +++ llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp @@ -13,9 +13,9 @@ #include "SPIRVCallLowering.h" #include "MCTargetDesc/SPIRVBaseInfo.h" +#include "Registries/SPIRVGlobalObjectRegistry.h" #include "SPIRV.h" #include "SPIRVBuiltins.h" -#include "SPIRVGlobalRegistry.h" #include "SPIRVISelLowering.h" #include "SPIRVRegisterInfo.h" #include "SPIRVSubtarget.h" @@ -26,8 +26,8 @@ using namespace llvm; SPIRVCallLowering::SPIRVCallLowering(const SPIRVTargetLowering &TLI, - SPIRVGlobalRegistry *GR) - : CallLowering(&TLI), GR(GR) {} + SPIRVGlobalObjectRegistry *GOR) + : CallLowering(&TLI), GOR(GOR) {} bool SPIRVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef VRegs, @@ -217,8 +217,9 @@ const Function &F, ArrayRef> VRegs, FunctionLoweringInfo &FLI) const { - assert(GR && "Must initialize the SPIRV type registry before lowering args."); - GR->setCurrentFunc(MIRBuilder.getMF()); + assert(GOR && + "Must initialize the SPIRV type registry before lowering args."); + GOR->setCurrentFunc(MIRBuilder.getMF()); // Assign types and names to all args, and store their types for later. FunctionType *FTy = getOriginalFunctionType(F); @@ -232,8 +233,8 @@ return false; SPIRV::AccessQualifier::AccessQualifier ArgAccessQual = getArgAccessQual(F, i); - auto *SpirvTy = GR->assignTypeToVReg(getArgType(F, i), VRegs[i][0], - MIRBuilder, ArgAccessQual); + auto *SpirvTy = GOR->assignTypeToVReg(getArgType(F, i), VRegs[i][0], + MIRBuilder, ArgAccessQual); ArgTypeVRegs.push_back(SpirvTy); if (Arg.hasName()) @@ -305,9 +306,10 @@ Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32)); MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass); if (F.isDeclaration()) - GR->add(&F, &MIRBuilder.getMF(), FuncVReg); - SPIRVType *RetTy = GR->getOrCreateSPIRVType(FTy->getReturnType(), MIRBuilder); - SPIRVType *FuncTy = GR->getOrCreateOpTypeFunctionWithArgs( + GOR->add(&F, &MIRBuilder.getMF(), FuncVReg); + SPIRVType *RetTy = + GOR->getOrCreateSPIRVType(FTy->getReturnType(), MIRBuilder); + SPIRVType *FuncTy = GOR->getOrCreateOpTypeFunctionWithArgs( FTy, RetTy, ArgTypeVRegs, MIRBuilder); // Build the OpTypeFunction declaring it. @@ -315,9 +317,9 @@ MIRBuilder.buildInstr(SPIRV::OpFunction) .addDef(FuncVReg) - .addUse(GR->getSPIRVTypeID(RetTy)) + .addUse(GOR->getSPIRVTypeID(RetTy)) .addImm(FuncControl) - .addUse(GR->getSPIRVTypeID(FuncTy)); + .addUse(GOR->getSPIRVTypeID(FuncTy)); // Add OpFunctionParameters. int i = 0; @@ -326,9 +328,9 @@ MRI->setRegClass(VRegs[i][0], &SPIRV::IDRegClass); MIRBuilder.buildInstr(SPIRV::OpFunctionParameter) .addDef(VRegs[i][0]) - .addUse(GR->getSPIRVTypeID(ArgTypeVRegs[i])); + .addUse(GOR->getSPIRVTypeID(ArgTypeVRegs[i])); if (F.isDeclaration()) - GR->add(&Arg, &MIRBuilder.getMF(), VRegs[i][0]); + GOR->add(&Arg, &MIRBuilder.getMF(), VRegs[i][0]); i++; } // Name the function. @@ -359,7 +361,7 @@ if (Info.OrigRet.Regs.size() > 1) return false; MachineFunction &MF = MIRBuilder.getMF(); - GR->setCurrentFunc(MF); + GOR->setCurrentFunc(MF); FunctionType *FTy = nullptr; const Function *CF = nullptr; @@ -390,16 +392,16 @@ for (auto Arg : Info.OrigArgs) { assert(Arg.Regs.size() == 1 && "Call arg has multiple VRegs"); ArgVRegs.push_back(Arg.Regs[0]); - SPIRVType *SPIRVTy = GR->getOrCreateSPIRVType(Arg.Ty, MIRBuilder); - GR->assignSPIRVTypeToVReg(SPIRVTy, Arg.Regs[0], MIRBuilder.getMF()); + SPIRVType *SPIRVTy = GOR->getOrCreateSPIRVType(Arg.Ty, MIRBuilder); + GOR->assignSPIRVTypeToVReg(SPIRVTy, Arg.Regs[0], MIRBuilder.getMF()); } if (auto Res = SPIRV::lowerBuiltin( DemangledName, SPIRV::InstructionSet::OpenCL_std, MIRBuilder, - ResVReg, OrigRetTy, ArgVRegs, GR)) + ResVReg, OrigRetTy, ArgVRegs, GOR)) return *Res; } if (CF && CF->isDeclaration() && - !GR->find(CF, &MIRBuilder.getMF()).isValid()) { + !GOR->find(CF, &MIRBuilder.getMF()).isValid()) { // Emit the type info and forward function declaration to the first MBB // to ensure VReg definition dependencies are valid across all MBBs. MachineIRBuilder FirstBlockBuilder; @@ -425,12 +427,12 @@ if (!ResVReg.isValid()) ResVReg = MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass); SPIRVType *RetType = - GR->assignTypeToVReg(FTy->getReturnType(), ResVReg, MIRBuilder); + GOR->assignTypeToVReg(FTy->getReturnType(), ResVReg, MIRBuilder); // Emit the OpFunctionCall and its args. auto MIB = MIRBuilder.buildInstr(SPIRV::OpFunctionCall) .addDef(ResVReg) - .addUse(GR->getSPIRVTypeID(RetType)) + .addUse(GOR->getSPIRVTypeID(RetType)) .add(Info.Callee); for (const auto &Arg : Info.OrigArgs) { Index: llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp =================================================================== --- llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -12,8 +12,8 @@ // //===----------------------------------------------------------------------===// +#include "Registries/SPIRVGlobalObjectRegistry.h" #include "SPIRV.h" -#include "SPIRVGlobalRegistry.h" #include "SPIRVInstrInfo.h" #include "SPIRVRegisterBankInfo.h" #include "SPIRVRegisterInfo.h" @@ -47,7 +47,7 @@ const SPIRVInstrInfo &TII; const SPIRVRegisterInfo &TRI; const RegisterBankInfo &RBI; - SPIRVGlobalRegistry &GR; + SPIRVGlobalObjectRegistry &GOR; MachineRegisterInfo *MRI; public: @@ -190,7 +190,8 @@ const SPIRVSubtarget &ST, const RegisterBankInfo &RBI) : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()), - TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()), + TRI(*ST.getRegisterInfo()), RBI(RBI), + GOR(*ST.getSPIRVGlobalObjectRegistry()), #define GET_GLOBALISEL_PREDICATES_INIT #include "SPIRVGenGlobalISel.inc" #undef GET_GLOBALISEL_PREDICATES_INIT @@ -205,7 +206,7 @@ ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { MRI = &MF.getRegInfo(); - GR.setCurrentFunc(MF); + GOR.setCurrentFunc(MF); InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); } @@ -248,7 +249,7 @@ // from parent occurs here. Instr-specific selection happens in spvSelect(). bool HasDefs = I.getNumDefs() > 0; Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0); - SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr; + SPIRVType *ResType = HasDefs ? GOR.getSPIRVTypeForVReg(ResVReg) : nullptr; assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE); if (spvSelect(ResVReg, ResType, I)) { if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs). @@ -286,7 +287,7 @@ MachineBasicBlock &BB = *I.getParent(); auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(I.getOperand(1).getReg()) .addUse(I.getOperand(2).getReg()); for (auto V : I.getOperand(3).getShuffleMask()) @@ -442,11 +443,12 @@ (*II).getOpcode() == TargetOpcode::COPY || (*II).getOpcode() == SPIRV::OpVariable) && isImm(I.getOperand(2), MRI)); - Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I); + Register Idx = + buildZerosVal(GOR.getOrCreateSPIRVIntegerType(32, I, TII), I); MachineBasicBlock &BB = *I.getParent(); auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addImm(static_cast( SPIRV::Opcode::InBoundsPtrAccessChain)) .addUse(GV) @@ -516,7 +518,7 @@ MachineBasicBlock &BB = *I.getParent(); auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addImm(static_cast(Set)) .addImm(Opcode); const unsigned NumOps = I.getNumOperands(); @@ -535,7 +537,7 @@ unsigned Opcode) const { return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(SrcReg) .constrainAllUses(TII, TRI, RBI); } @@ -595,7 +597,7 @@ Register Ptr = I.getOperand(1 + OpOffset).getReg(); auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(Ptr); if (!I.getNumMemOperands()) { assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS); @@ -632,27 +634,27 @@ assert(I.getOperand(1).isReg() && I.getOperand(2).isReg()); unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI); unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI); - SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII); - SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII); - Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII); - SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType( + SPIRVType *ValTy = GOR.getOrCreateSPIRVIntegerType(8, I, TII); + SPIRVType *ArrTy = GOR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII); + Register Const = GOR.getOrCreateConsIntArray(Val, I, ArrTy, TII); + SPIRVType *VarTy = GOR.getOrCreateSPIRVPointerType( ArrTy, I, TII, SPIRV::StorageClass::UniformConstant); // TODO: check if we have such GV, add init, use buildGlobalVariable. Type *LLVMArrTy = ArrayType::get( - IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num); + IntegerType::get(GOR.CurMF->getFunction().getContext(), 8), Num); GlobalVariable *GV = new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage); Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32)); - GR.add(GV, GR.CurMF, VarReg); + GOR.add(GV, GOR.CurMF, VarReg); buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {}); BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) .addDef(VarReg) - .addUse(GR.getSPIRVTypeID(VarTy)) + .addUse(GOR.getSPIRVTypeID(VarTy)) .addImm(SPIRV::StorageClass::UniformConstant) .addUse(Const) .constrainAllUses(TII, TRI, RBI); - SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType( + SPIRVType *SourceTy = GOR.getOrCreateSPIRVPointerType( ValTy, I, TII, SPIRV::StorageClass::UniformConstant); SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32)); selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast); @@ -682,14 +684,14 @@ Register Ptr = I.getOperand(1).getReg(); // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll // auto ScSem = - // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)); + // getMemSemanticsForStorageClass(GOR.getPointerStorageClass(Ptr)); AtomicOrdering AO = MemOp->getSuccessOrdering(); uint32_t MemSem = static_cast(getMemSemantics(AO)); Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I); return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(Ptr) .addUse(ScopeReg) .addUse(MemSemReg) @@ -725,7 +727,7 @@ ScopeReg = buildI32Constant(Scope, I); unsigned ScSem = static_cast( - getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr))); + getMemSemanticsForStorageClass(GOR.getPointerStorageClass(Ptr))); AtomicOrdering AO = MemOp->getSuccessOrdering(); unsigned MemSemEq = static_cast(getMemSemantics(AO)) | ScSem; MemSemEqReg = buildI32Constant(MemSemEq, I); @@ -741,13 +743,13 @@ Register Cmp = I.getOperand(3).getReg(); Register Val = I.getOperand(4).getReg(); - SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val); + SPIRVType *SpvValTy = GOR.getSPIRVTypeForVReg(Val); Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass); const DebugLoc &DL = I.getDebugLoc(); bool Result = BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange)) .addDef(ACmpRes) - .addUse(GR.getSPIRVTypeID(SpvValTy)) + .addUse(GOR.getSPIRVTypeID(SpvValTy)) .addUse(Ptr) .addUse(ScopeReg) .addUse(MemSemEqReg) @@ -756,24 +758,24 @@ .addUse(Cmp) .constrainAllUses(TII, TRI, RBI); Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); - SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII); + SPIRVType *BoolTy = GOR.getOrCreateSPIRVBoolType(I, TII); Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual)) .addDef(CmpSuccReg) - .addUse(GR.getSPIRVTypeID(BoolTy)) + .addUse(GOR.getSPIRVTypeID(BoolTy)) .addUse(ACmpRes) .addUse(Cmp) .constrainAllUses(TII, TRI, RBI); Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert)) .addDef(TmpReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(ACmpRes) - .addUse(GR.getOrCreateUndef(I, ResType, TII)) + .addUse(GOR.getOrCreateUndef(I, ResType, TII)) .addImm(0) .constrainAllUses(TII, TRI, RBI); Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(CmpSuccReg) .addUse(TmpReg) .addImm(1) @@ -809,22 +811,22 @@ isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) { Register NewReg = I.getOperand(1).getReg(); MachineBasicBlock &BB = *I.getParent(); - SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII); - ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII, - SPIRV::StorageClass::Generic); + SPIRVType *SpvBaseTy = GOR.getOrCreateSPIRVIntegerType(8, I, TII); + ResType = GOR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII, + SPIRV::StorageClass::Generic); bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addImm(static_cast(SPIRV::Opcode::PtrCastToGeneric)) .addUse(NewReg) .constrainAllUses(TII, TRI, RBI); return Result; } Register SrcPtr = I.getOperand(1).getReg(); - SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr); - SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr); - SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg); + SPIRVType *SrcPtrTy = GOR.getSPIRVTypeForVReg(SrcPtr); + SPIRV::StorageClass::StorageClass SrcSC = GOR.getPointerStorageClass(SrcPtr); + SPIRV::StorageClass::StorageClass DstSC = GOR.getPointerStorageClass(ResVReg); // Casting from an eligable pointer to Generic. if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC)) @@ -835,18 +837,18 @@ // Casting between 2 eligable pointers using Generic as an intermediary. if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) { Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass); - SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType( + SPIRVType *GenericPtrTy = GOR.getOrCreateSPIRVPointerType( SrcPtrTy, I, TII, SPIRV::StorageClass::Generic); MachineBasicBlock &BB = *I.getParent(); const DebugLoc &DL = I.getDebugLoc(); bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric)) .addDef(Tmp) - .addUse(GR.getSPIRVTypeID(GenericPtrTy)) + .addUse(GOR.getSPIRVTypeID(GenericPtrTy)) .addUse(SrcPtr) .constrainAllUses(TII, TRI, RBI); return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(Tmp) .constrainAllUses(TII, TRI, RBI); } @@ -949,7 +951,7 @@ MachineBasicBlock &BB = *I.getParent(); return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(I.getOperand(1).getReg()) .constrainAllUses(TII, TRI, RBI); } @@ -977,7 +979,7 @@ auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpConstantComposite)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)); + .addUse(GOR.getSPIRVTypeID(ResType)); for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i) MIB.addUse(I.getOperand(i).getReg()); return MIB.constrainAllUses(TII, TRI, RBI); @@ -989,12 +991,12 @@ MachineInstr &I) const { Register Cmp0 = I.getOperand(2).getReg(); Register Cmp1 = I.getOperand(3).getReg(); - assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() == - GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() && + assert(GOR.getSPIRVTypeForVReg(Cmp0)->getOpcode() == + GOR.getSPIRVTypeForVReg(Cmp1)->getOpcode() && "CMP operands should have the same type"); return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(Cmp0) .addUse(Cmp1) .constrainAllUses(TII, TRI, RBI); @@ -1007,9 +1009,9 @@ unsigned CmpOpc; Register CmpOperand = I.getOperand(2).getReg(); - if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer)) + if (GOR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer)) CmpOpc = getPtrCmpOpcode(Pred); - else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool)) + else if (GOR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool)) CmpOpc = getBoolCmpOpcode(Pred); else CmpOpc = getICmpOpcode(Pred); @@ -1036,25 +1038,25 @@ Register SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I, const SPIRVType *ResType) const { - Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32); + Type *LLVMTy = IntegerType::get(GOR.CurMF->getFunction().getContext(), 32); const SPIRVType *SpvI32Ty = - ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII); + ResType ? ResType : GOR.getOrCreateSPIRVIntegerType(32, I, TII); // Find a constant in DT or build a new one. auto ConstInt = ConstantInt::get(LLVMTy, Val); - Register NewReg = GR.find(ConstInt, GR.CurMF); + Register NewReg = GOR.find(ConstInt, GOR.CurMF); if (!NewReg.isValid()) { NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32)); - GR.add(ConstInt, GR.CurMF, NewReg); + GOR.add(ConstInt, GOR.CurMF, NewReg); MachineInstr *MI; MachineBasicBlock &BB = *I.getParent(); if (Val == 0) { MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull)) .addDef(NewReg) - .addUse(GR.getSPIRVTypeID(SpvI32Ty)); + .addUse(GOR.getSPIRVTypeID(SpvI32Ty)); } else { MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI)) .addDef(NewReg) - .addUse(GR.getSPIRVTypeID(SpvI32Ty)) + .addUse(GOR.getSPIRVTypeID(SpvI32Ty)) .addImm(APInt(32, Val).getZExtValue()); } constrainSelectedInstRegOperands(*MI, TII, TRI, RBI); @@ -1072,19 +1074,19 @@ Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const { if (ResType->getOpcode() == SPIRV::OpTypeVector) - return GR.getOrCreateConsIntVector(0, I, ResType, TII); - return GR.getOrCreateConstInt(0, I, ResType, TII); + return GOR.getOrCreateConsIntVector(0, I, ResType, TII); + return GOR.getOrCreateConstInt(0, I, ResType, TII); } Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes, const SPIRVType *ResType, MachineInstr &I) const { - unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType); + unsigned BitWidth = GOR.getScalarOrVectorBitWidth(ResType); APInt One = AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0); if (ResType->getOpcode() == SPIRV::OpTypeVector) - return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII); - return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII); + return GOR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII); + return GOR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII); } bool SPIRVInstructionSelector::selectSelect(Register ResVReg, @@ -1095,12 +1097,12 @@ Register ZeroReg = buildZerosVal(ResType, I); Register OneReg = buildOnesVal(IsSigned, ResType, I); bool IsScalarBool = - GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool); + GOR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool); unsigned Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond; return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(I.getOperand(1).getReg()) .addUse(OneReg) .addUse(ZeroReg) @@ -1114,12 +1116,12 @@ Register SrcReg = I.getOperand(1).getReg(); // We can convert bool value directly to float type without OpConvert*ToF, // however the translator generates OpSelect+OpConvert*ToF, so we do the same. - if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) { - unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType); - SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII); + if (GOR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) { + unsigned BitWidth = GOR.getScalarOrVectorBitWidth(ResType); + SPIRVType *TmpType = GOR.getOrCreateSPIRVIntegerType(BitWidth, I, TII); if (ResType->getOpcode() == SPIRV::OpTypeVector) { const unsigned NumElts = ResType->getOperand(2).getImm(); - TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII); + TmpType = GOR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII); } SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass); selectSelect(SrcReg, TmpType, I, false); @@ -1130,7 +1132,7 @@ bool SPIRVInstructionSelector::selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, bool IsSigned) const { - if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) + if (GOR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) return selectSelect(ResVReg, ResType, I, IsSigned); unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert; return selectUnOp(ResVReg, ResType, I, Opcode); @@ -1150,13 +1152,13 @@ MachineBasicBlock &BB = *I.getParent(); BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) .addDef(BitIntReg) - .addUse(GR.getSPIRVTypeID(IntTy)) + .addUse(GOR.getSPIRVTypeID(IntTy)) .addUse(IntReg) .addUse(One) .constrainAllUses(TII, TRI, RBI); return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(BoolTy)) + .addUse(GOR.getSPIRVTypeID(BoolTy)) .addUse(BitIntReg) .addUse(Zero) .constrainAllUses(TII, TRI, RBI); @@ -1165,12 +1167,12 @@ bool SPIRVInstructionSelector::selectTrunc(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const { - if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) { + if (GOR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) { Register IntReg = I.getOperand(1).getReg(); - const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg); + const SPIRVType *ArgType = GOR.getSPIRVTypeForVReg(IntReg); return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType); } - bool IsSigned = GR.isScalarOrVectorSigned(ResType); + bool IsSigned = GOR.isScalarOrVectorSigned(ResType); unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert; return selectUnOp(ResVReg, ResType, I, Opcode); } @@ -1186,11 +1188,11 @@ Imm.isZero()) return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .constrainAllUses(TII, TRI, RBI); if (TyOpcode == SPIRV::OpTypeInt) { assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!"); - Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII); + Register Reg = GOR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII); if (Reg == ResVReg) return true; return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY)) @@ -1200,7 +1202,7 @@ } auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)); + .addUse(GOR.getSPIRVTypeID(ResType)); // <=32-bit integers should be caught by the sdag pattern. assert(Imm.getBitWidth() > 32); addNumImm(Imm, MIB); @@ -1212,7 +1214,7 @@ MachineInstr &I) const { return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .constrainAllUses(TII, TRI, RBI); } @@ -1239,7 +1241,7 @@ MachineBasicBlock &BB = *I.getParent(); auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) // object to insert .addUse(I.getOperand(3).getReg()) // composite to insert into @@ -1255,7 +1257,7 @@ MachineBasicBlock &BB = *I.getParent(); auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(I.getOperand(2).getReg()); for (unsigned i = 3; i < I.getNumOperands(); i++) MIB.addImm(foldImm(I.getOperand(i), MRI)); @@ -1270,7 +1272,7 @@ MachineBasicBlock &BB = *I.getParent(); return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(I.getOperand(2).getReg()) .addUse(I.getOperand(3).getReg()) .addUse(I.getOperand(4).getReg()) @@ -1285,7 +1287,7 @@ MachineBasicBlock &BB = *I.getParent(); return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addUse(I.getOperand(2).getReg()) .addUse(I.getOperand(3).getReg()) .constrainAllUses(TII, TRI, RBI); @@ -1301,7 +1303,7 @@ : SPIRV::OpPtrAccessChain; auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) // Object to get a pointer to. .addUse(I.getOperand(3).getReg()); // Adding indices. @@ -1341,7 +1343,7 @@ case Intrinsic::spv_undef: { auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)); + .addUse(GOR.getSPIRVTypeID(ResType)); return MIB.constrainAllUses(TII, TRI, RBI); } case Intrinsic::spv_const_composite: { @@ -1351,7 +1353,7 @@ IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite; auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)); + .addUse(GOR.getSPIRVTypeID(ResType)); // skip type MD node we already used when generated assign.type for this if (!IsNull) { for (unsigned i = I.getNumExplicitDefs() + 1; @@ -1402,7 +1404,7 @@ MachineInstr &I) const { return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .addImm(static_cast(SPIRV::StorageClass::Function)) .constrainAllUses(TII, TRI, RBI); } @@ -1458,7 +1460,7 @@ MachineInstr &I) const { auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi)) .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)); + .addUse(GOR.getSPIRVTypeID(ResType)); const unsigned NumOps = I.getNumOperands(); for (unsigned i = 1; i < NumOps; i += 2) { MIB.addUse(I.getOperand(i + 0).getReg()); @@ -1472,7 +1474,7 @@ // FIXME: don't use MachineIRBuilder here, replace it with BuildMI. MachineIRBuilder MIRBuilder(I); const GlobalValue *GV = I.getOperand(1).getGlobal(); - SPIRVType *ResType = GR.getOrCreateSPIRVType( + SPIRVType *ResType = GOR.getOrCreateSPIRVType( GV->getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false); std::string GlobalIdent = GV->getGlobalIdentifier(); @@ -1483,15 +1485,15 @@ if (isa(GV)) { const Constant *ConstVal = GV; MachineBasicBlock &BB = *I.getParent(); - Register NewReg = GR.find(ConstVal, GR.CurMF); + Register NewReg = GOR.find(ConstVal, GOR.CurMF); if (!NewReg.isValid()) { - SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII); - ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII); + SPIRVType *SpvBaseTy = GOR.getOrCreateSPIRVIntegerType(8, I, TII); + ResType = GOR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII); Register NewReg = ResVReg; - GR.add(ConstVal, GR.CurMF, NewReg); + GOR.add(ConstVal, GOR.CurMF, NewReg); return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull)) .addDef(NewReg) - .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(GOR.getSPIRVTypeID(ResType)) .constrainAllUses(TII, TRI, RBI); } assert(NewReg != ResVReg); @@ -1520,9 +1522,9 @@ ? SPIRV::LinkageType::Import : SPIRV::LinkageType::Export; - Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV, - Storage, Init, GlobalVar->isConstant(), - HasLnkTy, LnkType, MIRBuilder, true); + Register Reg = GOR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV, + Storage, Init, GlobalVar->isConstant(), + HasLnkTy, LnkType, MIRBuilder, true); return Reg.isValid(); } Index: llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.h =================================================================== --- llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.h +++ llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.h @@ -13,7 +13,7 @@ #ifndef LLVM_LIB_TARGET_SPIRV_SPIRVMACHINELEGALIZER_H #define LLVM_LIB_TARGET_SPIRV_SPIRVMACHINELEGALIZER_H -#include "SPIRVGlobalRegistry.h" +#include "Registries/SPIRVGlobalObjectRegistry.h" #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" bool isTypeFoldingSupported(unsigned Opcode); @@ -26,7 +26,7 @@ // This class provides the information for legalizing SPIR-V instructions. class SPIRVLegalizerInfo : public LegalizerInfo { const SPIRVSubtarget *ST; - SPIRVGlobalRegistry *GR; + SPIRVGlobalObjectRegistry *GOR; public: bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI) const override; Index: llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp =================================================================== --- llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp +++ llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp @@ -11,8 +11,8 @@ //===----------------------------------------------------------------------===// #include "SPIRVLegalizerInfo.h" +#include "Registries/SPIRVGlobalObjectRegistry.h" #include "SPIRV.h" -#include "SPIRVGlobalRegistry.h" #include "SPIRVSubtarget.h" #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" @@ -58,7 +58,7 @@ using namespace TargetOpcode; this->ST = &ST; - GR = ST.getSPIRVGlobalRegistry(); + GOR = ST.getSPIRVGlobalObjectRegistry(); const LLT s1 = LLT::scalar(1); const LLT s8 = LLT::scalar(8); @@ -277,9 +277,9 @@ static Register convertPtrToInt(Register Reg, LLT ConvTy, SPIRVType *SpirvType, LegalizerHelper &Helper, MachineRegisterInfo &MRI, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalObjectRegistry *GOR) { Register ConvReg = MRI.createGenericVirtualRegister(ConvTy); - GR->assignSPIRVTypeToVReg(SpirvType, ConvReg, Helper.MIRBuilder.getMF()); + GOR->assignSPIRVTypeToVReg(SpirvType, ConvReg, Helper.MIRBuilder.getMF()); Helper.MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT) .addDef(ConvReg) .addUse(Reg); @@ -292,7 +292,7 @@ MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); if (!isTypeFoldingSupported(Opc)) { assert(Opc == TargetOpcode::G_ICMP); - assert(GR->getSPIRVTypeForVReg(MI.getOperand(0).getReg())); + assert(GOR->getSPIRVTypeForVReg(MI.getOperand(0).getReg())); auto &Op0 = MI.getOperand(2); auto &Op1 = MI.getOperand(3); Register Reg0 = Op0.getReg(); @@ -305,9 +305,9 @@ LLT ConvT = LLT::scalar(ST->getPointerSize()); Type *LLVMTy = IntegerType::get(MI.getMF()->getFunction().getContext(), ST->getPointerSize()); - SPIRVType *SpirvTy = GR->getOrCreateSPIRVType(LLVMTy, Helper.MIRBuilder); - Op0.setReg(convertPtrToInt(Reg0, ConvT, SpirvTy, Helper, MRI, GR)); - Op1.setReg(convertPtrToInt(Reg1, ConvT, SpirvTy, Helper, MRI, GR)); + SPIRVType *SpirvTy = GOR->getOrCreateSPIRVType(LLVMTy, Helper.MIRBuilder); + Op0.setReg(convertPtrToInt(Reg0, ConvT, SpirvTy, Helper, MRI, GOR)); + Op1.setReg(convertPtrToInt(Reg1, ConvT, SpirvTy, Helper, MRI, GOR)); } return true; } Index: llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h =================================================================== --- llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h +++ llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h @@ -15,7 +15,7 @@ #define LLVM_LIB_TARGET_SPIRV_SPIRVMODULEANALYSIS_H #include "MCTargetDesc/SPIRVBaseInfo.h" -#include "SPIRVGlobalRegistry.h" +#include "Registries/SPIRVGlobalObjectRegistry.h" #include "SPIRVUtils.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallSet.h" @@ -213,7 +213,7 @@ void numberRegistersGlobally(const Module &M); const SPIRVSubtarget *ST; - SPIRVGlobalRegistry *GR; + SPIRVGlobalObjectRegistry *GOR; const SPIRVInstrInfo *TII; MachineModuleInfo *MMI; }; Index: llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp =================================================================== --- llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp +++ llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp @@ -219,7 +219,7 @@ void SPIRVModuleAnalysis::processDefInstrs(const Module &M) { std::vector DepsGraph; - GR->buildDepsGraph(DepsGraph, SPVDumpDeps ? MMI : nullptr); + GOR->buildDepsGraph(DepsGraph, SPVDumpDeps ? MMI : nullptr); collectGlobalEntities( DepsGraph, SPIRV::MB_TypeConstVars, @@ -968,7 +968,7 @@ SPIRVTargetMachine &TM = getAnalysis().getTM(); ST = TM.getSubtargetImpl(); - GR = ST->getSPIRVGlobalRegistry(); + GOR = ST->getSPIRVGlobalObjectRegistry(); TII = ST->getInstrInfo(); MMI = &getAnalysis().getMMI(); Index: llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp =================================================================== --- llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp +++ llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp @@ -8,7 +8,7 @@ // // The pass prepares IR for legalization: it assigns SPIR-V types to registers // and removes intrinsics which holded these types during IR translation. -// Also it processes constants and registers them in GR to avoid duplication. +// Also it processes constants and registers them in GOR to avoid duplication. // //===----------------------------------------------------------------------===// @@ -38,7 +38,8 @@ }; } // namespace -static void addConstantsToTrack(MachineFunction &MF, SPIRVGlobalRegistry *GR) { +static void addConstantsToTrack(MachineFunction &MF, + SPIRVGlobalObjectRegistry *GOR) { MachineRegisterInfo &MRI = MF.getRegInfo(); DenseMap RegsAlreadyAddedToDT; SmallVector ToErase, ToEraseComposites; @@ -52,23 +53,23 @@ MI.getOperand(3).getMetadata()->getOperand(0)) ->getValue()); if (auto *GV = dyn_cast(Const)) { - Register Reg = GR->find(GV, &MF); + Register Reg = GOR->find(GV, &MF); if (!Reg.isValid()) - GR->add(GV, &MF, MI.getOperand(2).getReg()); + GOR->add(GV, &MF, MI.getOperand(2).getReg()); else RegsAlreadyAddedToDT[&MI] = Reg; } else { - Register Reg = GR->find(Const, &MF); + Register Reg = GOR->find(Const, &MF); if (!Reg.isValid()) { if (auto *ConstVec = dyn_cast(Const)) { auto *BuildVec = MRI.getVRegDef(MI.getOperand(2).getReg()); assert(BuildVec && BuildVec->getOpcode() == TargetOpcode::G_BUILD_VECTOR); for (unsigned i = 0; i < ConstVec->getNumElements(); ++i) - GR->add(ConstVec->getElementAsConstant(i), &MF, - BuildVec->getOperand(1 + i).getReg()); + GOR->add(ConstVec->getElementAsConstant(i), &MF, + BuildVec->getOperand(1 + i).getReg()); } - GR->add(Const, &MF, MI.getOperand(2).getReg()); + GOR->add(Const, &MF, MI.getOperand(2).getReg()); } else { RegsAlreadyAddedToDT[&MI] = Reg; // This MI is unused and will be removed. If the MI uses @@ -120,7 +121,7 @@ MI->eraseFromParent(); } -static void insertBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR, +static void insertBitcasts(MachineFunction &MF, SPIRVGlobalObjectRegistry *GOR, MachineIRBuilder MIB) { SmallVector ToErase; for (MachineBasicBlock &MBB : MF) { @@ -145,26 +146,27 @@ // // Set SPIRVType for GV, propagate it from GV to other instructions, // also set register classes. -static SPIRVType *propagateSPIRVType(MachineInstr *MI, SPIRVGlobalRegistry *GR, +static SPIRVType *propagateSPIRVType(MachineInstr *MI, + SPIRVGlobalObjectRegistry *GOR, MachineRegisterInfo &MRI, MachineIRBuilder &MIB) { SPIRVType *SpirvTy = nullptr; assert(MI && "Machine instr is expected"); if (MI->getOperand(0).isReg()) { Register Reg = MI->getOperand(0).getReg(); - SpirvTy = GR->getSPIRVTypeForVReg(Reg); + SpirvTy = GOR->getSPIRVTypeForVReg(Reg); if (!SpirvTy) { switch (MI->getOpcode()) { case TargetOpcode::G_CONSTANT: { MIB.setInsertPt(*MI->getParent(), MI); Type *Ty = MI->getOperand(1).getCImm()->getType(); - SpirvTy = GR->getOrCreateSPIRVType(Ty, MIB); + SpirvTy = GOR->getOrCreateSPIRVType(Ty, MIB); break; } case TargetOpcode::G_GLOBAL_VALUE: { MIB.setInsertPt(*MI->getParent(), MI); Type *Ty = MI->getOperand(1).getGlobal()->getType(); - SpirvTy = GR->getOrCreateSPIRVType(Ty, MIB); + SpirvTy = GOR->getOrCreateSPIRVType(Ty, MIB); break; } case TargetOpcode::G_TRUNC: @@ -174,14 +176,14 @@ MachineOperand &Op = MI->getOperand(1); MachineInstr *Def = Op.isReg() ? MRI.getVRegDef(Op.getReg()) : nullptr; if (Def) - SpirvTy = propagateSPIRVType(Def, GR, MRI, MIB); + SpirvTy = propagateSPIRVType(Def, GOR, MRI, MIB); break; } default: break; } if (SpirvTy) - GR->assignSPIRVTypeToVReg(SpirvTy, Reg, MIB.getMF()); + GOR->assignSPIRVTypeToVReg(SpirvTy, Reg, MIB.getMF()); if (!MRI.getRegClassOrNull(Reg)) MRI.setRegClass(Reg, &SPIRV::IDRegClass); } @@ -196,8 +198,8 @@ // TODO: maybe move to SPIRVUtils. namespace llvm { Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, - SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, - MachineRegisterInfo &MRI) { + SPIRVGlobalObjectRegistry *GOR, + MachineIRBuilder &MIB, MachineRegisterInfo &MRI) { MachineInstr *Def = MRI.getVRegDef(Reg); assert((Ty || SpirvTy) && "Either LLVM or SPIRV type is expected."); MIB.setInsertPt(*Def->getParent(), @@ -210,25 +212,26 @@ MRI.setRegClass(NewReg, &SPIRV::IDRegClass); MRI.setRegClass(Reg, &SPIRV::IDRegClass); } - SpirvTy = SpirvTy ? SpirvTy : GR->getOrCreateSPIRVType(Ty, MIB); - GR->assignSPIRVTypeToVReg(SpirvTy, Reg, MIB.getMF()); + SpirvTy = SpirvTy ? SpirvTy : GOR->getOrCreateSPIRVType(Ty, MIB); + GOR->assignSPIRVTypeToVReg(SpirvTy, Reg, MIB.getMF()); // This is to make it convenient for Legalizer to get the SPIRVType // when processing the actual MI (i.e. not pseudo one). - GR->assignSPIRVTypeToVReg(SpirvTy, NewReg, MIB.getMF()); + GOR->assignSPIRVTypeToVReg(SpirvTy, NewReg, MIB.getMF()); // Copy MIFlags from Def to ASSIGN_TYPE instruction. It's required to keep // the flags after instruction selection. const uint16_t Flags = Def->getFlags(); MIB.buildInstr(SPIRV::ASSIGN_TYPE) .addDef(Reg) .addUse(NewReg) - .addUse(GR->getSPIRVTypeID(SpirvTy)) + .addUse(GOR->getSPIRVTypeID(SpirvTy)) .setMIFlags(Flags); Def->getOperand(0).setReg(NewReg); return NewReg; } } // namespace llvm -static void generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR, +static void generateAssignInstrs(MachineFunction &MF, + SPIRVGlobalObjectRegistry *GOR, MachineIRBuilder MIB) { MachineRegisterInfo &MRI = MF.getRegInfo(); SmallVector ToErase; @@ -249,7 +252,7 @@ assert(Def && "Expecting an instruction that defines the register"); // G_GLOBAL_VALUE already has type info. if (Def->getOpcode() != TargetOpcode::G_GLOBAL_VALUE) - insertAssignInstr(Reg, Ty, nullptr, GR, MIB, MF.getRegInfo()); + insertAssignInstr(Reg, Ty, nullptr, GOR, MIB, MF.getRegInfo()); ToErase.push_back(&MI); } else if (MI.getOpcode() == TargetOpcode::G_CONSTANT || MI.getOpcode() == TargetOpcode::G_FCONSTANT || @@ -287,12 +290,12 @@ MI.getNumExplicitOperands() - MI.getNumExplicitDefs(); Ty = VectorType::get(ElemTy, NumElts, false); } - insertAssignInstr(Reg, Ty, nullptr, GR, MIB, MRI); + insertAssignInstr(Reg, Ty, nullptr, GOR, MIB, MRI); } else if (MI.getOpcode() == TargetOpcode::G_TRUNC || MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE || MI.getOpcode() == TargetOpcode::COPY || MI.getOpcode() == TargetOpcode::G_ADDRSPACE_CAST) { - propagateSPIRVType(&MI, GR, MRI, MIB); + propagateSPIRVType(&MI, GOR, MRI, MIB); } if (MII == Begin) @@ -307,14 +310,14 @@ static std::pair createNewIdReg(Register ValReg, unsigned Opcode, MachineRegisterInfo &MRI, - const SPIRVGlobalRegistry &GR) { + const SPIRVGlobalObjectRegistry &GOR) { LLT NewT = LLT::scalar(32); - SPIRVType *SpvType = GR.getSPIRVTypeForVReg(ValReg); + SPIRVType *SpvType = GOR.getSPIRVTypeForVReg(ValReg); assert(SpvType && "VReg is expected to have SPIRV type"); bool IsFloat = SpvType->getOpcode() == SPIRV::OpTypeFloat; bool IsVectorFloat = SpvType->getOpcode() == SPIRV::OpTypeVector && - GR.getSPIRVTypeForVReg(SpvType->getOperand(1).getReg())->getOpcode() == + GOR.getSPIRVTypeForVReg(SpvType->getOperand(1).getReg())->getOpcode() == SPIRV::OpTypeFloat; IsFloat |= IsVectorFloat; auto GetIdOp = IsFloat ? SPIRV::GET_fID : SPIRV::GET_ID; @@ -334,12 +337,13 @@ } static void processInstr(MachineInstr &MI, MachineIRBuilder &MIB, - MachineRegisterInfo &MRI, SPIRVGlobalRegistry *GR) { + MachineRegisterInfo &MRI, + SPIRVGlobalObjectRegistry *GOR) { unsigned Opc = MI.getOpcode(); assert(MI.getNumDefs() > 0 && MRI.hasOneUse(MI.getOperand(0).getReg())); MachineInstr &AssignTypeInst = *(MRI.use_instr_begin(MI.getOperand(0).getReg())); - auto NewReg = createNewIdReg(MI.getOperand(0).getReg(), Opc, MRI, *GR).first; + auto NewReg = createNewIdReg(MI.getOperand(0).getReg(), Opc, MRI, *GOR).first; AssignTypeInst.getOperand(1).setReg(NewReg); MI.getOperand(0).setReg(NewReg); MIB.setInsertPt(*MI.getParent(), @@ -348,7 +352,7 @@ for (auto &Op : MI.operands()) { if (!Op.isReg() || Op.isDef()) continue; - auto IdOpInfo = createNewIdReg(Op.getReg(), Opc, MRI, *GR); + auto IdOpInfo = createNewIdReg(Op.getReg(), Opc, MRI, *GOR); MIB.buildInstr(IdOpInfo.second).addDef(IdOpInfo.first).addUse(Op.getReg()); Op.setReg(IdOpInfo.first); } @@ -358,13 +362,13 @@ extern bool isTypeFoldingSupported(unsigned Opcode); static void processInstrsWithTypeFolding(MachineFunction &MF, - SPIRVGlobalRegistry *GR, + SPIRVGlobalObjectRegistry *GOR, MachineIRBuilder MIB) { MachineRegisterInfo &MRI = MF.getRegInfo(); for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { if (isTypeFoldingSupported(MI.getOpcode())) - processInstr(MI, MIB, MRI, GR); + processInstr(MI, MIB, MRI, GOR); } } for (MachineBasicBlock &MBB : MF) { @@ -393,7 +397,7 @@ } } -static void processSwitches(MachineFunction &MF, SPIRVGlobalRegistry *GR, +static void processSwitches(MachineFunction &MF, SPIRVGlobalObjectRegistry *GOR, MachineIRBuilder MIB) { // Before IRTranslator pass, calls to spv_switch intrinsic are inserted before // each switch instruction. IRTranslator lowers switches to G_ICMP + G_BRCOND @@ -577,15 +581,15 @@ bool SPIRVPreLegalizer::runOnMachineFunction(MachineFunction &MF) { // Initialize the type registry. const SPIRVSubtarget &ST = MF.getSubtarget(); - SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry(); - GR->setCurrentFunc(MF); + SPIRVGlobalObjectRegistry *GOR = ST.getSPIRVGlobalObjectRegistry(); + GOR->setCurrentFunc(MF); MachineIRBuilder MIB(MF); - addConstantsToTrack(MF, GR); + addConstantsToTrack(MF, GOR); foldConstantsIntoIntrinsics(MF); - insertBitcasts(MF, GR, MIB); - generateAssignInstrs(MF, GR, MIB); - processSwitches(MF, GR, MIB); - processInstrsWithTypeFolding(MF, GR, MIB); + insertBitcasts(MF, GOR, MIB); + generateAssignInstrs(MF, GOR, MIB); + processSwitches(MF, GOR, MIB); + processInstrsWithTypeFolding(MF, GOR, MIB); return true; } Index: llvm/lib/Target/SPIRV/SPIRVSubtarget.h =================================================================== --- llvm/lib/Target/SPIRV/SPIRVSubtarget.h +++ llvm/lib/Target/SPIRV/SPIRVSubtarget.h @@ -40,7 +40,7 @@ SmallSet AvailableExtensions; SmallSet AvailableExtInstSets; - std::unique_ptr GR; + std::unique_ptr GOR; SPIRVInstrInfo InstrInfo; SPIRVFrameLowering FrameLowering; @@ -82,7 +82,9 @@ bool canUseExtension(SPIRV::Extension::Extension E) const; bool canUseExtInstSet(SPIRV::InstructionSet::InstructionSet E) const; - SPIRVGlobalRegistry *getSPIRVGlobalRegistry() const { return GR.get(); } + SPIRVGlobalObjectRegistry *getSPIRVGlobalObjectRegistry() const { + return GOR.get(); + } const CallLowering *getCallLowering() const override { return CallLoweringInfo.get(); Index: llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp =================================================================== --- llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp +++ llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp @@ -11,8 +11,8 @@ //===----------------------------------------------------------------------===// #include "SPIRVSubtarget.h" +#include "Registries/SPIRVGlobalObjectRegistry.h" #include "SPIRV.h" -#include "SPIRVGlobalRegistry.h" #include "SPIRVLegalizerInfo.h" #include "SPIRVRegisterBankInfo.h" #include "SPIRVTargetMachine.h" @@ -50,8 +50,8 @@ initAvailableExtensions(); initAvailableExtInstSets(); - GR = std::make_unique(PointerSize); - CallLoweringInfo = std::make_unique(TLInfo, GR.get()); + GOR = std::make_unique(PointerSize); + CallLoweringInfo = std::make_unique(TLInfo, GOR.get()); Legalizer = std::make_unique(*this); RegBankInfo = std::make_unique(); InstSelector.reset( Index: llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp =================================================================== --- llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp +++ llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp @@ -13,7 +13,7 @@ #include "SPIRVTargetMachine.h" #include "SPIRV.h" #include "SPIRVCallLowering.h" -#include "SPIRVGlobalRegistry.h" +#include "Registries/SPIRVGlobalObjectRegistry.h" #include "SPIRVLegalizerInfo.h" #include "SPIRVTargetObjectFile.h" #include "SPIRVTargetTransformInfo.h"