diff --git a/llvm/lib/Target/DirectX/CMakeLists.txt b/llvm/lib/Target/DirectX/CMakeLists.txt --- a/llvm/lib/Target/DirectX/CMakeLists.txt +++ b/llvm/lib/Target/DirectX/CMakeLists.txt @@ -12,10 +12,10 @@ DXILPrepare.cpp LINK_COMPONENTS - Bitwriter Core Support DirectXInfo + DXILBitWriter ADD_TO_COMPONENT DirectX @@ -23,3 +23,4 @@ add_subdirectory(MCTargetDesc) add_subdirectory(TargetInfo) +add_subdirectory(DXILWriter) diff --git a/llvm/lib/Target/DirectX/DXILWriter/CMakeLists.txt b/llvm/lib/Target/DirectX/DXILWriter/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/DirectX/DXILWriter/CMakeLists.txt @@ -0,0 +1,15 @@ +add_llvm_component_library(LLVMDXILBitWriter + DXILBitcodeWriter.cpp + DXILValueEnumerator.cpp + DXILWriterPass.cpp + + DEPENDS + intrinsics_gen + + LINK_COMPONENTS + Bitwriter + Core + MC + Object + Support + ) diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h @@ -0,0 +1,82 @@ +//===- Bitcode/Writer/DXILBitcodeWriter.cpp - DXIL Bitcode Writer ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Bitcode writer implementation. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/StringRef.h" +#include "llvm/IR/ModuleSummaryIndex.h" +#include "llvm/MC/StringTableBuilder.h" +#include "llvm/Support/Allocator.h" +#include "llvm/Support/MemoryBufferRef.h" +#include +#include +#include +#include + +namespace llvm { + +class BitstreamWriter; +class Module; +class raw_ostream; + +namespace dxil { + +class BitcodeWriter { + SmallVectorImpl &Buffer; + std::unique_ptr Stream; + + StringTableBuilder StrtabBuilder{StringTableBuilder::RAW}; + + // Owns any strings created by the irsymtab writer until we create the + // string table. + BumpPtrAllocator Alloc; + + bool WroteStrtab = false, WroteSymtab = false; + + void writeBlob(unsigned Block, unsigned Record, StringRef Blob); + + std::vector Mods; + +public: + /// Create a BitcodeWriter that writes to Buffer. + BitcodeWriter(SmallVectorImpl &Buffer, raw_fd_stream *FS = nullptr); + + ~BitcodeWriter(); + + /// Attempt to write a symbol table to the bitcode file. This must be called + /// at most once after all modules have been written. + /// + /// A reader does not require a symbol table to interpret a bitcode file; + /// the symbol table is needed only to improve link-time performance. So + /// this function may decide not to write a symbol table. It may so decide + /// if, for example, the target is unregistered or the IR is malformed. + void writeSymtab(); + + /// Write the bitcode file's string table. This must be called exactly once + /// after all modules and the optional symbol table have been written. + void writeStrtab(); + + /// Copy the string table for another module into this bitcode file. This + /// should be called after copying the module itself into the bitcode file. + void copyStrtab(StringRef Strtab); + + /// Write the specified module to the buffer specified at construction time. + void writeModule(const Module &M); +}; + +/// Write the specified module to the specified raw output stream. +/// +/// For streams where it matters, the given stream should be in "binary" +/// mode. +void WriteDXILToFile(const Module &M, raw_ostream &Out); + +} // namespace dxil + +} // namespace llvm diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp @@ -0,0 +1,2963 @@ +//===- Bitcode/Writer/DXILBitcodeWriter.cpp - DXIL Bitcode Writer ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Bitcode writer implementation. +// +//===----------------------------------------------------------------------===// + +#include "DXILBitcodeWriter.h" +#include "DXILValueEnumerator.h" +#include "llvm/ADT/Triple.h" +#include "llvm/Bitcode/BitcodeCommon.h" +#include "llvm/Bitcode/BitcodeReader.h" +#include "llvm/Bitcode/LLVMBitCodes.h" +#include "llvm/Bitstream/BitCodes.h" +#include "llvm/Bitstream/BitstreamWriter.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Comdat.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DebugInfoMetadata.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalIFunc.h" +#include "llvm/IR/GlobalObject.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/ModuleSummaryIndex.h" +#include "llvm/IR/Operator.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/UseListOrder.h" +#include "llvm/IR/Value.h" +#include "llvm/IR/ValueSymbolTable.h" +#include "llvm/Object/IRSymtab.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/SHA1.h" + +namespace llvm { +namespace dxil { + +// Generates an enum to use as an index in the Abbrev array of Metadata record. +enum MetadataAbbrev : unsigned { +#define HANDLE_MDNODE_LEAF(CLASS) CLASS##AbbrevID, +#include "llvm/IR/Metadata.def" + LastPlusOne +}; + +class DXILBitcodeWriter { + + /// These are manifest constants used by the bitcode writer. They do not need + /// to be kept in sync with the reader, but need to be consistent within this + /// file. + enum { + // VALUE_SYMTAB_BLOCK abbrev id's. + VST_ENTRY_8_ABBREV = bitc::FIRST_APPLICATION_ABBREV, + VST_ENTRY_7_ABBREV, + VST_ENTRY_6_ABBREV, + VST_BBENTRY_6_ABBREV, + + // CONSTANTS_BLOCK abbrev id's. + CONSTANTS_SETTYPE_ABBREV = bitc::FIRST_APPLICATION_ABBREV, + CONSTANTS_INTEGER_ABBREV, + CONSTANTS_CE_CAST_Abbrev, + CONSTANTS_NULL_Abbrev, + + // FUNCTION_BLOCK abbrev id's. + FUNCTION_INST_LOAD_ABBREV = bitc::FIRST_APPLICATION_ABBREV, + FUNCTION_INST_BINOP_ABBREV, + FUNCTION_INST_BINOP_FLAGS_ABBREV, + FUNCTION_INST_CAST_ABBREV, + FUNCTION_INST_RET_VOID_ABBREV, + FUNCTION_INST_RET_VAL_ABBREV, + FUNCTION_INST_UNREACHABLE_ABBREV, + FUNCTION_INST_GEP_ABBREV, + }; + + /// The stream created and owned by the client. + BitstreamWriter &Stream; + + StringTableBuilder &StrtabBuilder; + + /// The Module to write to bitcode. + const Module &M; + + /// Enumerates ids for all values in the module. + ValueEnumerator VE; + + /// Map that holds the correspondence between GUIDs in the summary index, + /// that came from indirect call profiles, and a value id generated by this + /// class to use in the VST and summary block records. + std::map GUIDToValueIdMap; + + /// Tracks the last value id recorded in the GUIDToValueMap. + unsigned GlobalValueId; + + /// Saves the offset of the VSTOffset record that must eventually be + /// backpatched with the offset of the actual VST. + uint64_t VSTOffsetPlaceholder = 0; + + /// Pointer to the buffer allocated by caller for bitcode writing. + const SmallVectorImpl &Buffer; + + /// The start bit of the identification block. + uint64_t BitcodeStartBit; + +public: + /// Constructs a ModuleBitcodeWriter object for the given Module, + /// writing to the provided \p Buffer. + DXILBitcodeWriter(const Module &M, SmallVectorImpl &Buffer, + StringTableBuilder &StrtabBuilder, BitstreamWriter &Stream) + : Stream(Stream), StrtabBuilder(StrtabBuilder), M(M), + VE(M, true), Buffer(Buffer), + BitcodeStartBit(Stream.GetCurrentBitNo()) { + GlobalValueId = VE.getValues().size(); + } + + /// Emit the current module to the bitstream. + void write(); + + static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind); + static void writeStringRecord(BitstreamWriter &Stream, unsigned Code, + StringRef Str, unsigned AbbrevToUse); + static void writeIdentificationBlock(BitstreamWriter &Stream); + static void emitSignedInt64(SmallVectorImpl &Vals, uint64_t V); + static void emitWideAPInt(SmallVectorImpl &Vals, const APInt &A); + + static unsigned getEncodedComdatSelectionKind(const Comdat &C); + static unsigned getEncodedLinkage(const GlobalValue::LinkageTypes Linkage); + static unsigned getEncodedLinkage(const GlobalValue &GV); + static unsigned getEncodedVisibility(const GlobalValue &GV); + static unsigned getEncodedThreadLocalMode(const GlobalValue &GV); + static unsigned getEncodedDLLStorageClass(const GlobalValue &GV); + static unsigned getEncodedCastOpcode(unsigned Opcode); + static unsigned getEncodedUnaryOpcode(unsigned Opcode); + static unsigned getEncodedBinaryOpcode(unsigned Opcode); + static unsigned getEncodedRMWOperation(AtomicRMWInst::BinOp Op); + static unsigned getEncodedOrdering(AtomicOrdering Ordering); + static uint64_t getOptimizationFlags(const Value *V); + +private: + void writeModuleVersion(); + void writePerModuleGlobalValueSummary(); + + void writePerModuleFunctionSummaryRecord(SmallVector &NameVals, + GlobalValueSummary *Summary, + unsigned ValueID, + unsigned FSCallsAbbrev, + unsigned FSCallsProfileAbbrev, + const Function &F); + void writeModuleLevelReferences(const GlobalVariable &V, + SmallVector &NameVals, + unsigned FSModRefsAbbrev, + unsigned FSModVTableRefsAbbrev); + + void assignValueId(GlobalValue::GUID ValGUID) { + GUIDToValueIdMap[ValGUID] = ++GlobalValueId; + } + + unsigned getValueId(GlobalValue::GUID ValGUID) { + const auto &VMI = GUIDToValueIdMap.find(ValGUID); + // Expect that any GUID value had a value Id assigned by an + // earlier call to assignValueId. + assert(VMI != GUIDToValueIdMap.end() && + "GUID does not have assigned value Id"); + return VMI->second; + } + + // Helper to get the valueId for the type of value recorded in VI. + unsigned getValueId(ValueInfo VI) { + if (!VI.haveGVs() || !VI.getValue()) + return getValueId(VI.getGUID()); + return VE.getValueID(VI.getValue()); + } + + std::map &valueIds() { return GUIDToValueIdMap; } + + uint64_t bitcodeStartBit() { return BitcodeStartBit; } + + size_t addToStrtab(StringRef Str); + + unsigned createDILocationAbbrev(); + unsigned createGenericDINodeAbbrev(); + + void writeAttributeGroupTable(); + void writeAttributeTable(); + void writeTypeTable(); + void writeComdats(); + void writeValueSymbolTableForwardDecl(); + void writeModuleInfo(); + void writeValueAsMetadata(const ValueAsMetadata *MD, + SmallVectorImpl &Record); + void writeMDTuple(const MDTuple *N, SmallVectorImpl &Record, + unsigned Abbrev); + void writeDILocation(const DILocation *N, SmallVectorImpl &Record, + unsigned &Abbrev); + void writeGenericDINode(const GenericDINode *N, + SmallVectorImpl &Record, unsigned &Abbrev) { + llvm_unreachable("DXIL cannot contain GenericDI Nodes"); + } + void writeDISubrange(const DISubrange *N, SmallVectorImpl &Record, + unsigned Abbrev); + void writeDIGenericSubrange(const DIGenericSubrange *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + llvm_unreachable("DXIL cannot contain DIGenericSubrange Nodes"); + } + void writeDIEnumerator(const DIEnumerator *N, + SmallVectorImpl &Record, unsigned Abbrev); + void writeDIBasicType(const DIBasicType *N, SmallVectorImpl &Record, + unsigned Abbrev); + void writeDIStringType(const DIStringType *N, + SmallVectorImpl &Record, unsigned Abbrev) { + llvm_unreachable("DXIL cannot contain DIStringType Nodes"); + } + void writeDIDerivedType(const DIDerivedType *N, + SmallVectorImpl &Record, unsigned Abbrev); + void writeDICompositeType(const DICompositeType *N, + SmallVectorImpl &Record, unsigned Abbrev); + void writeDISubroutineType(const DISubroutineType *N, + SmallVectorImpl &Record, + unsigned Abbrev); + void writeDIFile(const DIFile *N, SmallVectorImpl &Record, + unsigned Abbrev); + void writeDICompileUnit(const DICompileUnit *N, + SmallVectorImpl &Record, unsigned Abbrev); + void writeDISubprogram(const DISubprogram *N, + SmallVectorImpl &Record, unsigned Abbrev); + void writeDILexicalBlock(const DILexicalBlock *N, + SmallVectorImpl &Record, unsigned Abbrev); + void writeDILexicalBlockFile(const DILexicalBlockFile *N, + SmallVectorImpl &Record, + unsigned Abbrev); + void writeDICommonBlock(const DICommonBlock *N, + SmallVectorImpl &Record, unsigned Abbrev) { + llvm_unreachable("DXIL cannot contain DICommonBlock Nodes"); + } + void writeDINamespace(const DINamespace *N, SmallVectorImpl &Record, + unsigned Abbrev); + void writeDIMacro(const DIMacro *N, SmallVectorImpl &Record, + unsigned Abbrev) { + llvm_unreachable("DXIL cannot contain DIMacro Nodes"); + } + void writeDIMacroFile(const DIMacroFile *N, SmallVectorImpl &Record, + unsigned Abbrev) { + llvm_unreachable("DXIL cannot contain DIMacroFile Nodes"); + } + void writeDIArgList(const DIArgList *N, SmallVectorImpl &Record, + unsigned Abbrev) { + llvm_unreachable("DXIL cannot contain DIArgList Nodes"); + } + void writeDIModule(const DIModule *N, SmallVectorImpl &Record, + unsigned Abbrev); + void writeDITemplateTypeParameter(const DITemplateTypeParameter *N, + SmallVectorImpl &Record, + unsigned Abbrev); + void writeDITemplateValueParameter(const DITemplateValueParameter *N, + SmallVectorImpl &Record, + unsigned Abbrev); + void writeDIGlobalVariable(const DIGlobalVariable *N, + SmallVectorImpl &Record, + unsigned Abbrev); + void writeDILocalVariable(const DILocalVariable *N, + SmallVectorImpl &Record, unsigned Abbrev); + void writeDILabel(const DILabel *N, SmallVectorImpl &Record, + unsigned Abbrev) { + llvm_unreachable("DXIL cannot contain DILabel Nodes"); + } + void writeDIExpression(const DIExpression *N, + SmallVectorImpl &Record, unsigned Abbrev); + void writeDIGlobalVariableExpression(const DIGlobalVariableExpression *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + llvm_unreachable("DXIL cannot contain GlobalVariableExpression Nodes"); + } + void writeDIObjCProperty(const DIObjCProperty *N, + SmallVectorImpl &Record, unsigned Abbrev); + void writeDIImportedEntity(const DIImportedEntity *N, + SmallVectorImpl &Record, + unsigned Abbrev); + unsigned createNamedMetadataAbbrev(); + void writeNamedMetadata(SmallVectorImpl &Record); + unsigned createMetadataStringsAbbrev(); + void writeMetadataStrings(ArrayRef Strings, + SmallVectorImpl &Record); + void writeMetadataRecords(ArrayRef MDs, + SmallVectorImpl &Record, + std::vector *MDAbbrevs = nullptr, + std::vector *IndexPos = nullptr); + void writeModuleMetadata(); + void writeFunctionMetadata(const Function &F); + void writeFunctionMetadataAttachment(const Function &F); + void pushGlobalMetadataAttachment(SmallVectorImpl &Record, + const GlobalObject &GO); + void writeModuleMetadataKinds(); + void writeOperandBundleTags(); + void writeSyncScopeNames(); + void writeConstants(unsigned FirstVal, unsigned LastVal, bool isGlobal); + void writeModuleConstants(); + bool pushValueAndType(const Value *V, unsigned InstID, + SmallVectorImpl &Vals); + void writeOperandBundles(const CallBase &CB, unsigned InstID); + void pushValue(const Value *V, unsigned InstID, + SmallVectorImpl &Vals); + void pushValueSigned(const Value *V, unsigned InstID, + SmallVectorImpl &Vals); + void writeInstruction(const Instruction &I, unsigned InstID, + SmallVectorImpl &Vals); + void writeFunctionLevelValueSymbolTable(const ValueSymbolTable &VST); + void writeGlobalValueSymbolTable( + DenseMap &FunctionToBitcodeIndex); + void writeUseList(UseListOrder &&Order); + void writeUseListBlock(const Function *F); + void writeFunction(const Function &F); + void writeBlockInfo(); + + unsigned getEncodedSyncScopeID(SyncScope::ID SSID) { return unsigned(SSID); } + + unsigned getEncodedAlign(MaybeAlign Alignment) { return encode(Alignment); } +}; + +} // namespace dxil +} // namespace llvm + +using namespace llvm; +using namespace llvm::dxil; + +//////////////////////////////////////////////////////////////////////////////// +/// Begin dxil::BitcodeWriter Implementation +//////////////////////////////////////////////////////////////////////////////// + +dxil::BitcodeWriter::BitcodeWriter(SmallVectorImpl &Buffer, raw_fd_stream *FS) + : Buffer(Buffer), Stream(new BitstreamWriter(Buffer, FS, 512)) { + // Emit the file header. + Stream->Emit((unsigned)'B', 8); + Stream->Emit((unsigned)'C', 8); + Stream->Emit(0x0, 4); + Stream->Emit(0xC, 4); + Stream->Emit(0xE, 4); + Stream->Emit(0xD, 4); +} + +dxil::BitcodeWriter::~BitcodeWriter() { assert(WroteStrtab); } + +/// Write the specified module to the specified output stream. +void dxil::WriteDXILToFile(const Module &M, raw_ostream &Out) { + SmallVector Buffer; + Buffer.reserve(256 * 1024); + + // If this is darwin or another generic macho target, reserve space for the + // header. + Triple TT(M.getTargetTriple()); + if (TT.isOSDarwin() || TT.isOSBinFormatMachO()) + Buffer.insert(Buffer.begin(), BWH_HeaderSize, 0); + + BitcodeWriter Writer(Buffer, dyn_cast(&Out)); + Writer.writeModule(M); + Writer.writeSymtab(); + Writer.writeStrtab(); + + // Write the generated bitstream to "Out". + if (!Buffer.empty()) + Out.write((char *)&Buffer.front(), Buffer.size()); +} + +void BitcodeWriter::writeBlob(unsigned Block, unsigned Record, StringRef Blob) { + Stream->EnterSubblock(Block, 3); + + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(Record)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); + auto AbbrevNo = Stream->EmitAbbrev(std::move(Abbv)); + + Stream->EmitRecordWithBlob(AbbrevNo, ArrayRef{Record}, Blob); + + Stream->ExitBlock(); +} + +void BitcodeWriter::writeSymtab() { + assert(!WroteStrtab && !WroteSymtab); + + // If any module has module-level inline asm, we will require a registered asm + // parser for the target so that we can create an accurate symbol table for + // the module. + for (Module *M : Mods) { + if (M->getModuleInlineAsm().empty()) + continue; + } + + WroteSymtab = true; + SmallVector Symtab; + // The irsymtab::build function may be unable to create a symbol table if the + // module is malformed (e.g. it contains an invalid alias). Writing a symbol + // table is not required for correctness, but we still want to be able to + // write malformed modules to bitcode files, so swallow the error. + if (Error E = irsymtab::build(Mods, Symtab, StrtabBuilder, Alloc)) { + consumeError(std::move(E)); + return; + } + + writeBlob(bitc::SYMTAB_BLOCK_ID, bitc::SYMTAB_BLOB, + {Symtab.data(), Symtab.size()}); +} + +void BitcodeWriter::writeStrtab() { + assert(!WroteStrtab); + + std::vector Strtab; + StrtabBuilder.finalizeInOrder(); + Strtab.resize(StrtabBuilder.getSize()); + StrtabBuilder.write((uint8_t *)Strtab.data()); + + writeBlob(bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB, + {Strtab.data(), Strtab.size()}); + + WroteStrtab = true; +} + +void BitcodeWriter::copyStrtab(StringRef Strtab) { + writeBlob(bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB, Strtab); + WroteStrtab = true; +} + +void BitcodeWriter::writeModule(const Module &M) { + assert(!WroteStrtab); + + // The Mods vector is used by irsymtab::build, which requires non-const + // Modules in case it needs to materialize metadata. But the bitcode writer + // requires that the module is materialized, so we can cast to non-const here, + // after checking that it is in fact materialized. + assert(M.isMaterialized()); + Mods.push_back(const_cast(&M)); + + DXILBitcodeWriter ModuleWriter(M, Buffer, StrtabBuilder, *Stream); + ModuleWriter.write(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// Begin dxil::BitcodeWriterBase Implementation +//////////////////////////////////////////////////////////////////////////////// + +unsigned DXILBitcodeWriter::getEncodedCastOpcode(unsigned Opcode) { + switch (Opcode) { + default: + llvm_unreachable("Unknown cast instruction!"); + case Instruction::Trunc: + return bitc::CAST_TRUNC; + case Instruction::ZExt: + return bitc::CAST_ZEXT; + case Instruction::SExt: + return bitc::CAST_SEXT; + case Instruction::FPToUI: + return bitc::CAST_FPTOUI; + case Instruction::FPToSI: + return bitc::CAST_FPTOSI; + case Instruction::UIToFP: + return bitc::CAST_UITOFP; + case Instruction::SIToFP: + return bitc::CAST_SITOFP; + case Instruction::FPTrunc: + return bitc::CAST_FPTRUNC; + case Instruction::FPExt: + return bitc::CAST_FPEXT; + case Instruction::PtrToInt: + return bitc::CAST_PTRTOINT; + case Instruction::IntToPtr: + return bitc::CAST_INTTOPTR; + case Instruction::BitCast: + return bitc::CAST_BITCAST; + case Instruction::AddrSpaceCast: + return bitc::CAST_ADDRSPACECAST; + } +} + +unsigned DXILBitcodeWriter::getEncodedUnaryOpcode(unsigned Opcode) { + switch (Opcode) { + default: + llvm_unreachable("Unknown binary instruction!"); + case Instruction::FNeg: + return bitc::UNOP_FNEG; + } +} + +unsigned DXILBitcodeWriter::getEncodedBinaryOpcode(unsigned Opcode) { + switch (Opcode) { + default: + llvm_unreachable("Unknown binary instruction!"); + case Instruction::Add: + case Instruction::FAdd: + return bitc::BINOP_ADD; + case Instruction::Sub: + case Instruction::FSub: + return bitc::BINOP_SUB; + case Instruction::Mul: + case Instruction::FMul: + return bitc::BINOP_MUL; + case Instruction::UDiv: + return bitc::BINOP_UDIV; + case Instruction::FDiv: + case Instruction::SDiv: + return bitc::BINOP_SDIV; + case Instruction::URem: + return bitc::BINOP_UREM; + case Instruction::FRem: + case Instruction::SRem: + return bitc::BINOP_SREM; + case Instruction::Shl: + return bitc::BINOP_SHL; + case Instruction::LShr: + return bitc::BINOP_LSHR; + case Instruction::AShr: + return bitc::BINOP_ASHR; + case Instruction::And: + return bitc::BINOP_AND; + case Instruction::Or: + return bitc::BINOP_OR; + case Instruction::Xor: + return bitc::BINOP_XOR; + } +} + +unsigned DXILBitcodeWriter::getEncodedRMWOperation(AtomicRMWInst::BinOp Op) { + switch (Op) { + default: + llvm_unreachable("Unknown RMW operation!"); + case AtomicRMWInst::Xchg: + return bitc::RMW_XCHG; + case AtomicRMWInst::Add: + return bitc::RMW_ADD; + case AtomicRMWInst::Sub: + return bitc::RMW_SUB; + case AtomicRMWInst::And: + return bitc::RMW_AND; + case AtomicRMWInst::Nand: + return bitc::RMW_NAND; + case AtomicRMWInst::Or: + return bitc::RMW_OR; + case AtomicRMWInst::Xor: + return bitc::RMW_XOR; + case AtomicRMWInst::Max: + return bitc::RMW_MAX; + case AtomicRMWInst::Min: + return bitc::RMW_MIN; + case AtomicRMWInst::UMax: + return bitc::RMW_UMAX; + case AtomicRMWInst::UMin: + return bitc::RMW_UMIN; + case AtomicRMWInst::FAdd: + return bitc::RMW_FADD; + case AtomicRMWInst::FSub: + return bitc::RMW_FSUB; + } +} + +unsigned DXILBitcodeWriter::getEncodedOrdering(AtomicOrdering Ordering) { + switch (Ordering) { + case AtomicOrdering::NotAtomic: + return bitc::ORDERING_NOTATOMIC; + case AtomicOrdering::Unordered: + return bitc::ORDERING_UNORDERED; + case AtomicOrdering::Monotonic: + return bitc::ORDERING_MONOTONIC; + case AtomicOrdering::Acquire: + return bitc::ORDERING_ACQUIRE; + case AtomicOrdering::Release: + return bitc::ORDERING_RELEASE; + case AtomicOrdering::AcquireRelease: + return bitc::ORDERING_ACQREL; + case AtomicOrdering::SequentiallyConsistent: + return bitc::ORDERING_SEQCST; + } + llvm_unreachable("Invalid ordering"); +} + +void DXILBitcodeWriter::writeStringRecord(BitstreamWriter &Stream, + unsigned Code, StringRef Str, + unsigned AbbrevToUse) { + SmallVector Vals; + + // Code: [strchar x N] + for (char C : Str) { + if (AbbrevToUse && !BitCodeAbbrevOp::isChar6(C)) + AbbrevToUse = 0; + Vals.push_back(C); + } + + // Emit the finished record. + Stream.EmitRecord(Code, Vals, AbbrevToUse); +} + +uint64_t DXILBitcodeWriter::getAttrKindEncoding(Attribute::AttrKind Kind) { + switch (Kind) { + case Attribute::Alignment: + return bitc::ATTR_KIND_ALIGNMENT; + case Attribute::AllocAlign: + return bitc::ATTR_KIND_ALLOC_ALIGN; + case Attribute::AllocSize: + return bitc::ATTR_KIND_ALLOC_SIZE; + case Attribute::AlwaysInline: + return bitc::ATTR_KIND_ALWAYS_INLINE; + case Attribute::ArgMemOnly: + return bitc::ATTR_KIND_ARGMEMONLY; + case Attribute::Builtin: + return bitc::ATTR_KIND_BUILTIN; + case Attribute::ByVal: + return bitc::ATTR_KIND_BY_VAL; + case Attribute::Convergent: + return bitc::ATTR_KIND_CONVERGENT; + case Attribute::InAlloca: + return bitc::ATTR_KIND_IN_ALLOCA; + case Attribute::Cold: + return bitc::ATTR_KIND_COLD; + case Attribute::DisableSanitizerInstrumentation: + return bitc::ATTR_KIND_DISABLE_SANITIZER_INSTRUMENTATION; + case Attribute::Hot: + return bitc::ATTR_KIND_HOT; + case Attribute::ElementType: + return bitc::ATTR_KIND_ELEMENTTYPE; + case Attribute::InaccessibleMemOnly: + return bitc::ATTR_KIND_INACCESSIBLEMEM_ONLY; + case Attribute::InaccessibleMemOrArgMemOnly: + return bitc::ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY; + case Attribute::InlineHint: + return bitc::ATTR_KIND_INLINE_HINT; + case Attribute::InReg: + return bitc::ATTR_KIND_IN_REG; + case Attribute::JumpTable: + return bitc::ATTR_KIND_JUMP_TABLE; + case Attribute::MinSize: + return bitc::ATTR_KIND_MIN_SIZE; + case Attribute::Naked: + return bitc::ATTR_KIND_NAKED; + case Attribute::Nest: + return bitc::ATTR_KIND_NEST; + case Attribute::NoAlias: + return bitc::ATTR_KIND_NO_ALIAS; + case Attribute::NoBuiltin: + return bitc::ATTR_KIND_NO_BUILTIN; + case Attribute::NoCallback: + return bitc::ATTR_KIND_NO_CALLBACK; + case Attribute::NoCapture: + return bitc::ATTR_KIND_NO_CAPTURE; + case Attribute::NoDuplicate: + return bitc::ATTR_KIND_NO_DUPLICATE; + case Attribute::NoFree: + return bitc::ATTR_KIND_NOFREE; + case Attribute::NoImplicitFloat: + return bitc::ATTR_KIND_NO_IMPLICIT_FLOAT; + case Attribute::NoInline: + return bitc::ATTR_KIND_NO_INLINE; + case Attribute::NoRecurse: + return bitc::ATTR_KIND_NO_RECURSE; + case Attribute::NoMerge: + return bitc::ATTR_KIND_NO_MERGE; + case Attribute::NonLazyBind: + return bitc::ATTR_KIND_NON_LAZY_BIND; + case Attribute::NonNull: + return bitc::ATTR_KIND_NON_NULL; + case Attribute::Dereferenceable: + return bitc::ATTR_KIND_DEREFERENCEABLE; + case Attribute::DereferenceableOrNull: + return bitc::ATTR_KIND_DEREFERENCEABLE_OR_NULL; + case Attribute::NoRedZone: + return bitc::ATTR_KIND_NO_RED_ZONE; + case Attribute::NoReturn: + return bitc::ATTR_KIND_NO_RETURN; + case Attribute::NoSync: + return bitc::ATTR_KIND_NOSYNC; + case Attribute::NoCfCheck: + return bitc::ATTR_KIND_NOCF_CHECK; + case Attribute::NoProfile: + return bitc::ATTR_KIND_NO_PROFILE; + case Attribute::NoUnwind: + return bitc::ATTR_KIND_NO_UNWIND; + case Attribute::NoSanitizeBounds: + return bitc::ATTR_KIND_NO_SANITIZE_BOUNDS; + case Attribute::NoSanitizeCoverage: + return bitc::ATTR_KIND_NO_SANITIZE_COVERAGE; + case Attribute::NullPointerIsValid: + return bitc::ATTR_KIND_NULL_POINTER_IS_VALID; + case Attribute::OptForFuzzing: + return bitc::ATTR_KIND_OPT_FOR_FUZZING; + case Attribute::OptimizeForSize: + return bitc::ATTR_KIND_OPTIMIZE_FOR_SIZE; + case Attribute::OptimizeNone: + return bitc::ATTR_KIND_OPTIMIZE_NONE; + case Attribute::ReadNone: + return bitc::ATTR_KIND_READ_NONE; + case Attribute::ReadOnly: + return bitc::ATTR_KIND_READ_ONLY; + case Attribute::Returned: + return bitc::ATTR_KIND_RETURNED; + case Attribute::ReturnsTwice: + return bitc::ATTR_KIND_RETURNS_TWICE; + case Attribute::SExt: + return bitc::ATTR_KIND_S_EXT; + case Attribute::Speculatable: + return bitc::ATTR_KIND_SPECULATABLE; + case Attribute::StackAlignment: + return bitc::ATTR_KIND_STACK_ALIGNMENT; + case Attribute::StackProtect: + return bitc::ATTR_KIND_STACK_PROTECT; + case Attribute::StackProtectReq: + return bitc::ATTR_KIND_STACK_PROTECT_REQ; + case Attribute::StackProtectStrong: + return bitc::ATTR_KIND_STACK_PROTECT_STRONG; + case Attribute::SafeStack: + return bitc::ATTR_KIND_SAFESTACK; + case Attribute::ShadowCallStack: + return bitc::ATTR_KIND_SHADOWCALLSTACK; + case Attribute::StrictFP: + return bitc::ATTR_KIND_STRICT_FP; + case Attribute::StructRet: + return bitc::ATTR_KIND_STRUCT_RET; + case Attribute::SanitizeAddress: + return bitc::ATTR_KIND_SANITIZE_ADDRESS; + case Attribute::SanitizeHWAddress: + return bitc::ATTR_KIND_SANITIZE_HWADDRESS; + case Attribute::SanitizeThread: + return bitc::ATTR_KIND_SANITIZE_THREAD; + case Attribute::SanitizeMemory: + return bitc::ATTR_KIND_SANITIZE_MEMORY; + case Attribute::SpeculativeLoadHardening: + return bitc::ATTR_KIND_SPECULATIVE_LOAD_HARDENING; + case Attribute::SwiftError: + return bitc::ATTR_KIND_SWIFT_ERROR; + case Attribute::SwiftSelf: + return bitc::ATTR_KIND_SWIFT_SELF; + case Attribute::SwiftAsync: + return bitc::ATTR_KIND_SWIFT_ASYNC; + case Attribute::UWTable: + return bitc::ATTR_KIND_UW_TABLE; + case Attribute::VScaleRange: + return bitc::ATTR_KIND_VSCALE_RANGE; + case Attribute::WillReturn: + return bitc::ATTR_KIND_WILLRETURN; + case Attribute::WriteOnly: + return bitc::ATTR_KIND_WRITEONLY; + case Attribute::ZExt: + return bitc::ATTR_KIND_Z_EXT; + case Attribute::ImmArg: + return bitc::ATTR_KIND_IMMARG; + case Attribute::SanitizeMemTag: + return bitc::ATTR_KIND_SANITIZE_MEMTAG; + case Attribute::Preallocated: + return bitc::ATTR_KIND_PREALLOCATED; + case Attribute::NoUndef: + return bitc::ATTR_KIND_NOUNDEF; + case Attribute::ByRef: + return bitc::ATTR_KIND_BYREF; + case Attribute::MustProgress: + return bitc::ATTR_KIND_MUSTPROGRESS; + case Attribute::EndAttrKinds: + llvm_unreachable("Can not encode end-attribute kinds marker."); + case Attribute::None: + llvm_unreachable("Can not encode none-attribute."); + case Attribute::EmptyKey: + case Attribute::TombstoneKey: + llvm_unreachable("Trying to encode EmptyKey/TombstoneKey"); + } + + llvm_unreachable("Trying to encode unknown attribute"); +} + +void DXILBitcodeWriter::emitSignedInt64(SmallVectorImpl &Vals, + uint64_t V) { + if ((int64_t)V >= 0) + Vals.push_back(V << 1); + else + Vals.push_back((-V << 1) | 1); +} + +void DXILBitcodeWriter::emitWideAPInt(SmallVectorImpl &Vals, + const APInt &A) { + // We have an arbitrary precision integer value to write whose + // bit width is > 64. However, in canonical unsigned integer + // format it is likely that the high bits are going to be zero. + // So, we only write the number of active words. + unsigned NumWords = A.getActiveWords(); + const uint64_t *RawData = A.getRawData(); + for (unsigned i = 0; i < NumWords; i++) + emitSignedInt64(Vals, RawData[i]); +} + +uint64_t DXILBitcodeWriter::getOptimizationFlags(const Value *V) { + uint64_t Flags = 0; + + if (const auto *OBO = dyn_cast(V)) { + if (OBO->hasNoSignedWrap()) + Flags |= 1 << bitc::OBO_NO_SIGNED_WRAP; + if (OBO->hasNoUnsignedWrap()) + Flags |= 1 << bitc::OBO_NO_UNSIGNED_WRAP; + } else if (const auto *PEO = dyn_cast(V)) { + if (PEO->isExact()) + Flags |= 1 << bitc::PEO_EXACT; + } else if (const auto *FPMO = dyn_cast(V)) { + if (FPMO->hasAllowReassoc()) + Flags |= bitc::AllowReassoc; + if (FPMO->hasNoNaNs()) + Flags |= bitc::NoNaNs; + if (FPMO->hasNoInfs()) + Flags |= bitc::NoInfs; + if (FPMO->hasNoSignedZeros()) + Flags |= bitc::NoSignedZeros; + if (FPMO->hasAllowReciprocal()) + Flags |= bitc::AllowReciprocal; + if (FPMO->hasAllowContract()) + Flags |= bitc::AllowContract; + if (FPMO->hasApproxFunc()) + Flags |= bitc::ApproxFunc; + } + + return Flags; +} + +unsigned +DXILBitcodeWriter::getEncodedLinkage(const GlobalValue::LinkageTypes Linkage) { + switch (Linkage) { + case GlobalValue::ExternalLinkage: + return 0; + case GlobalValue::WeakAnyLinkage: + return 16; + case GlobalValue::AppendingLinkage: + return 2; + case GlobalValue::InternalLinkage: + return 3; + case GlobalValue::LinkOnceAnyLinkage: + return 18; + case GlobalValue::ExternalWeakLinkage: + return 7; + case GlobalValue::CommonLinkage: + return 8; + case GlobalValue::PrivateLinkage: + return 9; + case GlobalValue::WeakODRLinkage: + return 17; + case GlobalValue::LinkOnceODRLinkage: + return 19; + case GlobalValue::AvailableExternallyLinkage: + return 12; + } + llvm_unreachable("Invalid linkage"); +} + +unsigned DXILBitcodeWriter::getEncodedLinkage(const GlobalValue &GV) { + return getEncodedLinkage(GV.getLinkage()); +} + +unsigned DXILBitcodeWriter::getEncodedVisibility(const GlobalValue &GV) { + switch (GV.getVisibility()) { + case GlobalValue::DefaultVisibility: return 0; + case GlobalValue::HiddenVisibility: return 1; + case GlobalValue::ProtectedVisibility: return 2; + } + llvm_unreachable("Invalid visibility"); +} + +unsigned DXILBitcodeWriter::getEncodedDLLStorageClass(const GlobalValue &GV) { + switch (GV.getDLLStorageClass()) { + case GlobalValue::DefaultStorageClass: return 0; + case GlobalValue::DLLImportStorageClass: return 1; + case GlobalValue::DLLExportStorageClass: return 2; + } + llvm_unreachable("Invalid DLL storage class"); +} + +unsigned DXILBitcodeWriter::getEncodedThreadLocalMode(const GlobalValue &GV) { + switch (GV.getThreadLocalMode()) { + case GlobalVariable::NotThreadLocal: return 0; + case GlobalVariable::GeneralDynamicTLSModel: return 1; + case GlobalVariable::LocalDynamicTLSModel: return 2; + case GlobalVariable::InitialExecTLSModel: return 3; + case GlobalVariable::LocalExecTLSModel: return 4; + } + llvm_unreachable("Invalid TLS model"); +} + +unsigned DXILBitcodeWriter::getEncodedComdatSelectionKind(const Comdat &C) { + switch (C.getSelectionKind()) { + case Comdat::Any: + return bitc::COMDAT_SELECTION_KIND_ANY; + case Comdat::ExactMatch: + return bitc::COMDAT_SELECTION_KIND_EXACT_MATCH; + case Comdat::Largest: + return bitc::COMDAT_SELECTION_KIND_LARGEST; + case Comdat::NoDeduplicate: + return bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES; + case Comdat::SameSize: + return bitc::COMDAT_SELECTION_KIND_SAME_SIZE; + } + llvm_unreachable("Invalid selection kind"); +} + +//////////////////////////////////////////////////////////////////////////////// +/// Begin DXILBitcodeWriter Implementation +//////////////////////////////////////////////////////////////////////////////// + +void DXILBitcodeWriter::writeAttributeGroupTable() { + const std::vector &AttrGrps = + VE.getAttributeGroups(); + if (AttrGrps.empty()) + return; + + Stream.EnterSubblock(bitc::PARAMATTR_GROUP_BLOCK_ID, 3); + + SmallVector Record; + for (ValueEnumerator::IndexAndAttrSet Pair : AttrGrps) { + unsigned AttrListIndex = Pair.first; + AttributeSet AS = Pair.second; + Record.push_back(VE.getAttributeGroupID(Pair)); + Record.push_back(AttrListIndex); + + for (Attribute Attr : AS) { + if (Attr.isEnumAttribute()) { + uint64_t Val = getAttrKindEncoding(Attr.getKindAsEnum()); + assert(Val <= bitc::ATTR_KIND_ARGMEMONLY && + "DXIL does not support attributes above ATTR_KIND_ARGMEMONLY"); + Record.push_back(0); + Record.push_back(Val); + } else if (Attr.isIntAttribute()) { + uint64_t Val = getAttrKindEncoding(Attr.getKindAsEnum()); + assert(Val <= bitc::ATTR_KIND_ARGMEMONLY && + "DXIL does not support attributes above ATTR_KIND_ARGMEMONLY"); + Record.push_back(1); + Record.push_back(Val); + Record.push_back(Attr.getValueAsInt()); + } else { + StringRef Kind = Attr.getKindAsString(); + StringRef Val = Attr.getValueAsString(); + + Record.push_back(Val.empty() ? 3 : 4); + Record.append(Kind.begin(), Kind.end()); + Record.push_back(0); + if (!Val.empty()) { + Record.append(Val.begin(), Val.end()); + Record.push_back(0); + } + } + } + + Stream.EmitRecord(bitc::PARAMATTR_GRP_CODE_ENTRY, Record); + Record.clear(); + } + + Stream.ExitBlock(); +} + +void DXILBitcodeWriter::writeAttributeTable() { + const std::vector &Attrs = VE.getAttributeLists(); + if (Attrs.empty()) + return; + + Stream.EnterSubblock(bitc::PARAMATTR_BLOCK_ID, 3); + + SmallVector Record; + for (unsigned i = 0, e = Attrs.size(); i != e; ++i) { + AttributeList AL = Attrs[i]; + for (unsigned i : AL.indexes()) { + AttributeSet AS = AL.getAttributes(i); + if (AS.hasAttributes()) + Record.push_back(VE.getAttributeGroupID({i, AS})); + } + + Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record); + Record.clear(); + } + + Stream.ExitBlock(); +} + +/// WriteTypeTable - Write out the type table for a module. +void DXILBitcodeWriter::writeTypeTable() { + const ValueEnumerator::TypeList &TypeList = VE.getTypes(); + + Stream.EnterSubblock(bitc::TYPE_BLOCK_ID_NEW, 4 /*count from # abbrevs */); + SmallVector TypeVals; + + uint64_t NumBits = VE.computeBitsRequiredForTypeIndicies(); + + // Abbrev for TYPE_CODE_POINTER. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_POINTER)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); + Abbv->Add(BitCodeAbbrevOp(0)); // Addrspace = 0 + unsigned PtrAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + + // Abbrev for TYPE_CODE_FUNCTION. + Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_FUNCTION)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isvararg + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); + unsigned FunctionAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + + // Abbrev for TYPE_CODE_STRUCT_ANON. + Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_ANON)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ispacked + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); + unsigned StructAnonAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + + // Abbrev for TYPE_CODE_STRUCT_NAME. + Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_NAME)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); + unsigned StructNameAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + + // Abbrev for TYPE_CODE_STRUCT_NAMED. + Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_NAMED)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ispacked + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); + unsigned StructNamedAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + + // Abbrev for TYPE_CODE_ARRAY. + Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_ARRAY)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // size + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); + unsigned ArrayAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + + // Emit an entry count so the reader can reserve space. + TypeVals.push_back(TypeList.size()); + Stream.EmitRecord(bitc::TYPE_CODE_NUMENTRY, TypeVals); + TypeVals.clear(); + + // Loop over all of the types, emitting each in turn. + for (Type *T : TypeList) { + int AbbrevToUse = 0; + unsigned Code = 0; + + switch (T->getTypeID()) { + case Type::BFloatTyID: + case Type::X86_AMXTyID: + case Type::TokenTyID: + llvm_unreachable("These should never be used!!!"); + break; + case Type::VoidTyID: + Code = bitc::TYPE_CODE_VOID; + break; + case Type::HalfTyID: + Code = bitc::TYPE_CODE_HALF; + break; + case Type::FloatTyID: + Code = bitc::TYPE_CODE_FLOAT; + break; + case Type::DoubleTyID: + Code = bitc::TYPE_CODE_DOUBLE; + break; + case Type::X86_FP80TyID: + Code = bitc::TYPE_CODE_X86_FP80; + break; + case Type::FP128TyID: + Code = bitc::TYPE_CODE_FP128; + break; + case Type::PPC_FP128TyID: + Code = bitc::TYPE_CODE_PPC_FP128; + break; + case Type::LabelTyID: + Code = bitc::TYPE_CODE_LABEL; + break; + case Type::MetadataTyID: + Code = bitc::TYPE_CODE_METADATA; + break; + case Type::X86_MMXTyID: + Code = bitc::TYPE_CODE_X86_MMX; + break; + case Type::IntegerTyID: + // INTEGER: [width] + Code = bitc::TYPE_CODE_INTEGER; + TypeVals.push_back(cast(T)->getBitWidth()); + break; + case Type::PointerTyID: { + PointerType *PTy = cast(T); + // POINTER: [pointee type, address space] + Code = bitc::TYPE_CODE_POINTER; + TypeVals.push_back(VE.getTypeID(PTy->getNonOpaquePointerElementType())); + unsigned AddressSpace = PTy->getAddressSpace(); + TypeVals.push_back(AddressSpace); + if (AddressSpace == 0) + AbbrevToUse = PtrAbbrev; + break; + } + case Type::FunctionTyID: { + FunctionType *FT = cast(T); + // FUNCTION: [isvararg, retty, paramty x N] + Code = bitc::TYPE_CODE_FUNCTION; + TypeVals.push_back(FT->isVarArg()); + TypeVals.push_back(VE.getTypeID(FT->getReturnType())); + for (Type *PTy : FT->params()) + TypeVals.push_back(VE.getTypeID(PTy)); + AbbrevToUse = FunctionAbbrev; + break; + } + case Type::StructTyID: { + StructType *ST = cast(T); + // STRUCT: [ispacked, eltty x N] + TypeVals.push_back(ST->isPacked()); + // Output all of the element types. + for (Type *ElTy : ST->elements()) + TypeVals.push_back(VE.getTypeID(ElTy)); + + if (ST->isLiteral()) { + Code = bitc::TYPE_CODE_STRUCT_ANON; + AbbrevToUse = StructAnonAbbrev; + } else { + if (ST->isOpaque()) { + Code = bitc::TYPE_CODE_OPAQUE; + } else { + Code = bitc::TYPE_CODE_STRUCT_NAMED; + AbbrevToUse = StructNamedAbbrev; + } + + // Emit the name if it is present. + if (!ST->getName().empty()) + writeStringRecord(Stream, bitc::TYPE_CODE_STRUCT_NAME, ST->getName(), + StructNameAbbrev); + } + break; + } + case Type::ArrayTyID: { + ArrayType *AT = cast(T); + // ARRAY: [numelts, eltty] + Code = bitc::TYPE_CODE_ARRAY; + TypeVals.push_back(AT->getNumElements()); + TypeVals.push_back(VE.getTypeID(AT->getElementType())); + AbbrevToUse = ArrayAbbrev; + break; + } + case Type::FixedVectorTyID: + case Type::ScalableVectorTyID: { + VectorType *VT = cast(T); + // VECTOR [numelts, eltty] + Code = bitc::TYPE_CODE_VECTOR; + TypeVals.push_back(VT->getElementCount().getKnownMinValue()); + TypeVals.push_back(VE.getTypeID(VT->getElementType())); + break; + } + } + + // Emit the finished record. + Stream.EmitRecord(Code, TypeVals, AbbrevToUse); + TypeVals.clear(); + } + + Stream.ExitBlock(); +} + +void DXILBitcodeWriter::writeComdats() { + SmallVector Vals; + for (const Comdat *C : VE.getComdats()) { + // COMDAT: [selection_kind, name] + Vals.push_back(getEncodedComdatSelectionKind(*C)); + size_t Size = C->getName().size(); + assert(isUInt<16>(Size)); + Vals.push_back(Size); + for (char Chr : C->getName()) + Vals.push_back((unsigned char)Chr); + Stream.EmitRecord(bitc::MODULE_CODE_COMDAT, Vals, /*AbbrevToUse=*/0); + Vals.clear(); + } +} + +void DXILBitcodeWriter::writeValueSymbolTableForwardDecl() {} + +/// Emit top-level description of module, including target triple, inline asm, +/// descriptors for global variables, and function prototype info. +/// Returns the bit offset to backpatch with the location of the real VST. +void DXILBitcodeWriter::writeModuleInfo() { + // Emit various pieces of data attached to a module. + if (!M.getTargetTriple().empty()) + writeStringRecord(Stream, bitc::MODULE_CODE_TRIPLE, M.getTargetTriple(), + 0 /*TODO*/); + const std::string &DL = M.getDataLayoutStr(); + if (!DL.empty()) + writeStringRecord(Stream, bitc::MODULE_CODE_DATALAYOUT, DL, 0 /*TODO*/); + if (!M.getModuleInlineAsm().empty()) + writeStringRecord(Stream, bitc::MODULE_CODE_ASM, M.getModuleInlineAsm(), + 0 /*TODO*/); + + // Emit information about sections and GC, computing how many there are. Also + // compute the maximum alignment value. + std::map SectionMap; + std::map GCMap; + MaybeAlign MaxAlignment; + unsigned MaxGlobalType = 0; + const auto UpdateMaxAlignment = [&MaxAlignment](const MaybeAlign A) { + if (A) + MaxAlignment = !MaxAlignment ? *A : std::max(*MaxAlignment, *A); + }; + for (const GlobalVariable &GV : M.globals()) { + UpdateMaxAlignment(GV.getAlign()); + MaxGlobalType = std::max(MaxGlobalType, VE.getTypeID(GV.getValueType())); + if (GV.hasSection()) { + // Give section names unique ID's. + unsigned &Entry = SectionMap[std::string(GV.getSection())]; + if (!Entry) { + writeStringRecord(Stream, bitc::MODULE_CODE_SECTIONNAME, + GV.getSection(), 0 /*TODO*/); + Entry = SectionMap.size(); + } + } + } + for (const Function &F : M) { + UpdateMaxAlignment(F.getAlign()); + if (F.hasSection()) { + // Give section names unique ID's. + unsigned &Entry = SectionMap[std::string(F.getSection())]; + if (!Entry) { + writeStringRecord(Stream, bitc::MODULE_CODE_SECTIONNAME, F.getSection(), + 0 /*TODO*/); + Entry = SectionMap.size(); + } + } + if (F.hasGC()) { + // Same for GC names. + unsigned &Entry = GCMap[F.getGC()]; + if (!Entry) { + writeStringRecord(Stream, bitc::MODULE_CODE_GCNAME, F.getGC(), + 0 /*TODO*/); + Entry = GCMap.size(); + } + } + } + + // Emit abbrev for globals, now that we know # sections and max alignment. + unsigned SimpleGVarAbbrev = 0; + if (!M.global_empty()) { + // Add an abbrev for common globals with no visibility or thread localness. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_GLOBALVAR)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, + Log2_32_Ceil(MaxGlobalType + 1))); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // AddrSpace << 2 + //| explicitType << 1 + //| constant + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Initializer. + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 5)); // Linkage. + if (MaxAlignment == 0) // Alignment. + Abbv->Add(BitCodeAbbrevOp(0)); + else { + unsigned MaxEncAlignment = getEncodedAlign(MaxAlignment); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, + Log2_32_Ceil(MaxEncAlignment + 1))); + } + if (SectionMap.empty()) // Section. + Abbv->Add(BitCodeAbbrevOp(0)); + else + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, + Log2_32_Ceil(SectionMap.size() + 1))); + // Don't bother emitting vis + thread local. + SimpleGVarAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + } + + // Emit the global variable information. + SmallVector Vals; + for (const GlobalVariable &GV : M.globals()) { + unsigned AbbrevToUse = 0; + + // GLOBALVAR: [type, isconst, initid, + // linkage, alignment, section, visibility, threadlocal, + // unnamed_addr, externally_initialized, dllstorageclass, + // comdat] + Vals.push_back(VE.getTypeID(GV.getValueType())); + Vals.push_back( + GV.getType()->getAddressSpace() << 2 | 2 | + (GV.isConstant() ? 1 : 0)); // HLSL Change - bitwise | was used with + // unsigned int and bool + Vals.push_back( + GV.isDeclaration() ? 0 : (VE.getValueID(GV.getInitializer()) + 1)); + Vals.push_back(getEncodedLinkage(GV)); + Vals.push_back(getEncodedAlign(GV.getAlign())); + Vals.push_back(GV.hasSection() ? SectionMap[std::string(GV.getSection())] + : 0); + if (GV.isThreadLocal() || + GV.getVisibility() != GlobalValue::DefaultVisibility || + GV.getUnnamedAddr() != GlobalValue::UnnamedAddr::None || + GV.isExternallyInitialized() || + GV.getDLLStorageClass() != GlobalValue::DefaultStorageClass || + GV.hasComdat()) { + Vals.push_back(getEncodedVisibility(GV)); + Vals.push_back(getEncodedThreadLocalMode(GV)); + Vals.push_back(GV.getUnnamedAddr() != GlobalValue::UnnamedAddr::None); + Vals.push_back(GV.isExternallyInitialized()); + Vals.push_back(getEncodedDLLStorageClass(GV)); + Vals.push_back(GV.hasComdat() ? VE.getComdatID(GV.getComdat()) : 0); + } else { + AbbrevToUse = SimpleGVarAbbrev; + } + + Stream.EmitRecord(bitc::MODULE_CODE_GLOBALVAR, Vals, AbbrevToUse); + Vals.clear(); + } + + // Emit the function proto information. + for (const Function &F : M) { + // FUNCTION: [type, callingconv, isproto, linkage, paramattrs, alignment, + // section, visibility, gc, unnamed_addr, prologuedata, + // dllstorageclass, comdat, prefixdata, personalityfn] + Vals.push_back(VE.getTypeID(F.getFunctionType())); + Vals.push_back(F.getCallingConv()); + Vals.push_back(F.isDeclaration()); + Vals.push_back(getEncodedLinkage(F)); + Vals.push_back(VE.getAttributeListID(F.getAttributes())); + Vals.push_back(getEncodedAlign(F.getAlign())); + Vals.push_back(F.hasSection() ? SectionMap[std::string(F.getSection())] + : 0); + Vals.push_back(getEncodedVisibility(F)); + Vals.push_back(F.hasGC() ? GCMap[F.getGC()] : 0); + Vals.push_back(F.getUnnamedAddr() != GlobalValue::UnnamedAddr::None); + Vals.push_back( + F.hasPrologueData() ? (VE.getValueID(F.getPrologueData()) + 1) : 0); + Vals.push_back(getEncodedDLLStorageClass(F)); + Vals.push_back(F.hasComdat() ? VE.getComdatID(F.getComdat()) : 0); + Vals.push_back(F.hasPrefixData() ? (VE.getValueID(F.getPrefixData()) + 1) + : 0); + Vals.push_back( + F.hasPersonalityFn() ? (VE.getValueID(F.getPersonalityFn()) + 1) : 0); + + unsigned AbbrevToUse = 0; + Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals, AbbrevToUse); + Vals.clear(); + } + + // Emit the alias information. + for (const GlobalAlias &A : M.aliases()) { + // ALIAS: [alias type, aliasee val#, linkage, visibility] + Vals.push_back(VE.getTypeID(A.getValueType())); + Vals.push_back(VE.getValueID(A.getAliasee())); + Vals.push_back(getEncodedLinkage(A)); + Vals.push_back(getEncodedVisibility(A)); + Vals.push_back(getEncodedDLLStorageClass(A)); + Vals.push_back(getEncodedThreadLocalMode(A)); + Vals.push_back(A.getUnnamedAddr() != GlobalValue::UnnamedAddr::None); + unsigned AbbrevToUse = 0; + Stream.EmitRecord(bitc::MODULE_CODE_ALIAS_OLD, Vals, AbbrevToUse); + Vals.clear(); + } +} + +void DXILBitcodeWriter::writeValueAsMetadata( + const ValueAsMetadata *MD, SmallVectorImpl &Record) { + // Mimic an MDNode with a value as one operand. + Value *V = MD->getValue(); + Record.push_back(VE.getTypeID(V->getType())); + Record.push_back(VE.getValueID(V)); + Stream.EmitRecord(bitc::METADATA_VALUE, Record, 0); + Record.clear(); +} + +void DXILBitcodeWriter::writeMDTuple(const MDTuple *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { + Metadata *MD = N->getOperand(i); + assert(!(MD && isa(MD)) && + "Unexpected function-local metadata"); + Record.push_back(VE.getMetadataOrNullID(MD)); + } + Stream.EmitRecord(N->isDistinct() ? bitc::METADATA_DISTINCT_NODE + : bitc::METADATA_NODE, + Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDILocation(const DILocation *N, + SmallVectorImpl &Record, + unsigned &Abbrev) { + if (!Abbrev) + Abbrev = createDILocationAbbrev(); + Record.push_back(N->isDistinct()); + Record.push_back(N->getLine()); + Record.push_back(N->getColumn()); + Record.push_back(VE.getMetadataID(N->getScope())); + Record.push_back(VE.getMetadataOrNullID(N->getInlinedAt())); + + Stream.EmitRecord(bitc::METADATA_LOCATION, Record, Abbrev); + Record.clear(); +} + +static uint64_t rotateSign(APInt Val) { + int64_t I = Val.getSExtValue(); + uint64_t U = I; + return I < 0 ? ~(U << 1) : U << 1; +} + +static uint64_t rotateSign(DISubrange::BoundType Val) { + return rotateSign(Val.get()->getValue()); +} + +void DXILBitcodeWriter::writeDISubrange(const DISubrange *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back( + N->getCount().get()->getValue().getSExtValue()); + Record.push_back(rotateSign(N->getLowerBound())); + + Stream.EmitRecord(bitc::METADATA_SUBRANGE, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDIEnumerator(const DIEnumerator *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(rotateSign(N->getValue())); + Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + + Stream.EmitRecord(bitc::METADATA_ENUMERATOR, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDIBasicType(const DIBasicType *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(N->getTag()); + Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + Record.push_back(N->getSizeInBits()); + Record.push_back(N->getAlignInBits()); + Record.push_back(N->getEncoding()); + + Stream.EmitRecord(bitc::METADATA_BASIC_TYPE, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDIDerivedType(const DIDerivedType *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(N->getTag()); + Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + Record.push_back(VE.getMetadataOrNullID(N->getFile())); + Record.push_back(N->getLine()); + Record.push_back(VE.getMetadataOrNullID(N->getScope())); + Record.push_back(VE.getMetadataOrNullID(N->getBaseType())); + Record.push_back(N->getSizeInBits()); + Record.push_back(N->getAlignInBits()); + Record.push_back(N->getOffsetInBits()); + Record.push_back(N->getFlags()); + Record.push_back(VE.getMetadataOrNullID(N->getExtraData())); + + Stream.EmitRecord(bitc::METADATA_DERIVED_TYPE, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDICompositeType(const DICompositeType *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(N->getTag()); + Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + Record.push_back(VE.getMetadataOrNullID(N->getFile())); + Record.push_back(N->getLine()); + Record.push_back(VE.getMetadataOrNullID(N->getScope())); + Record.push_back(VE.getMetadataOrNullID(N->getBaseType())); + Record.push_back(N->getSizeInBits()); + Record.push_back(N->getAlignInBits()); + Record.push_back(N->getOffsetInBits()); + Record.push_back(N->getFlags()); + Record.push_back(VE.getMetadataOrNullID(N->getElements().get())); + Record.push_back(N->getRuntimeLang()); + Record.push_back(VE.getMetadataOrNullID(N->getVTableHolder())); + Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams().get())); + Record.push_back(VE.getMetadataOrNullID(N->getRawIdentifier())); + + Stream.EmitRecord(bitc::METADATA_COMPOSITE_TYPE, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDISubroutineType(const DISubroutineType *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(N->getFlags()); + Record.push_back(VE.getMetadataOrNullID(N->getTypeArray().get())); + + Stream.EmitRecord(bitc::METADATA_SUBROUTINE_TYPE, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDIFile(const DIFile *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(VE.getMetadataOrNullID(N->getRawFilename())); + Record.push_back(VE.getMetadataOrNullID(N->getRawDirectory())); + + Stream.EmitRecord(bitc::METADATA_FILE, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDICompileUnit(const DICompileUnit *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(N->getSourceLanguage()); + Record.push_back(VE.getMetadataOrNullID(N->getFile())); + Record.push_back(VE.getMetadataOrNullID(N->getRawProducer())); + Record.push_back(N->isOptimized()); + Record.push_back(VE.getMetadataOrNullID(N->getRawFlags())); + Record.push_back(N->getRuntimeVersion()); + Record.push_back(VE.getMetadataOrNullID(N->getRawSplitDebugFilename())); + Record.push_back(N->getEmissionKind()); + Record.push_back(VE.getMetadataOrNullID(N->getEnumTypes().get())); + Record.push_back(VE.getMetadataOrNullID(N->getRetainedTypes().get())); + Record.push_back(/* subprograms */ 0); + Record.push_back(VE.getMetadataOrNullID(N->getGlobalVariables().get())); + Record.push_back(VE.getMetadataOrNullID(N->getImportedEntities().get())); + Record.push_back(N->getDWOId()); + + Stream.EmitRecord(bitc::METADATA_COMPILE_UNIT, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDISubprogram(const DISubprogram *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(VE.getMetadataOrNullID(N->getScope())); + Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + Record.push_back(VE.getMetadataOrNullID(N->getRawLinkageName())); + Record.push_back(VE.getMetadataOrNullID(N->getFile())); + Record.push_back(N->getLine()); + Record.push_back(VE.getMetadataOrNullID(N->getType())); + Record.push_back(N->isLocalToUnit()); + Record.push_back(N->isDefinition()); + Record.push_back(N->getScopeLine()); + Record.push_back(VE.getMetadataOrNullID(N->getContainingType())); + Record.push_back(N->getVirtuality()); + Record.push_back(N->getVirtualIndex()); + Record.push_back(N->getFlags()); + Record.push_back(N->isOptimized()); + Record.push_back(VE.getMetadataOrNullID(N->getRawUnit())); + Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams().get())); + Record.push_back(VE.getMetadataOrNullID(N->getDeclaration())); + Record.push_back(VE.getMetadataOrNullID(N->getRetainedNodes().get())); + + Stream.EmitRecord(bitc::METADATA_SUBPROGRAM, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDILexicalBlock(const DILexicalBlock *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(VE.getMetadataOrNullID(N->getScope())); + Record.push_back(VE.getMetadataOrNullID(N->getFile())); + Record.push_back(N->getLine()); + Record.push_back(N->getColumn()); + + Stream.EmitRecord(bitc::METADATA_LEXICAL_BLOCK, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDILexicalBlockFile( + const DILexicalBlockFile *N, SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(VE.getMetadataOrNullID(N->getScope())); + Record.push_back(VE.getMetadataOrNullID(N->getFile())); + Record.push_back(N->getDiscriminator()); + + Stream.EmitRecord(bitc::METADATA_LEXICAL_BLOCK_FILE, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDINamespace(const DINamespace *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(VE.getMetadataOrNullID(N->getScope())); + Record.push_back(VE.getMetadataOrNullID(N->getFile())); + Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + Record.push_back(/* line number */ 0); + + Stream.EmitRecord(bitc::METADATA_NAMESPACE, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDIModule(const DIModule *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + for (auto &I : N->operands()) + Record.push_back(VE.getMetadataOrNullID(I)); + + Stream.EmitRecord(bitc::METADATA_MODULE, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDITemplateTypeParameter( + const DITemplateTypeParameter *N, SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + Record.push_back(VE.getMetadataOrNullID(N->getType())); + + Stream.EmitRecord(bitc::METADATA_TEMPLATE_TYPE, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDITemplateValueParameter( + const DITemplateValueParameter *N, SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(N->getTag()); + Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + Record.push_back(VE.getMetadataOrNullID(N->getType())); + Record.push_back(VE.getMetadataOrNullID(N->getValue())); + + Stream.EmitRecord(bitc::METADATA_TEMPLATE_VALUE, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDIGlobalVariable(const DIGlobalVariable *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(VE.getMetadataOrNullID(N->getScope())); + Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + Record.push_back(VE.getMetadataOrNullID(N->getRawLinkageName())); + Record.push_back(VE.getMetadataOrNullID(N->getFile())); + Record.push_back(N->getLine()); + Record.push_back(VE.getMetadataOrNullID(N->getType())); + Record.push_back(N->isLocalToUnit()); + Record.push_back(N->isDefinition()); + Record.push_back(/* N->getRawVariable() */ 0); + Record.push_back(VE.getMetadataOrNullID(N->getStaticDataMemberDeclaration())); + + Stream.EmitRecord(bitc::METADATA_GLOBAL_VAR, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDILocalVariable(const DILocalVariable *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(N->getTag()); + Record.push_back(VE.getMetadataOrNullID(N->getScope())); + Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + Record.push_back(VE.getMetadataOrNullID(N->getFile())); + Record.push_back(N->getLine()); + Record.push_back(VE.getMetadataOrNullID(N->getType())); + Record.push_back(N->getArg()); + Record.push_back(N->getFlags()); + + Stream.EmitRecord(bitc::METADATA_LOCAL_VAR, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDIExpression(const DIExpression *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.reserve(N->getElements().size() + 1); + + Record.push_back(N->isDistinct()); + Record.append(N->elements_begin(), N->elements_end()); + + Stream.EmitRecord(bitc::METADATA_EXPRESSION, Record, Abbrev); + Record.clear(); +} + +void DXILBitcodeWriter::writeDIObjCProperty(const DIObjCProperty *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + llvm_unreachable("DXIL does not support objc!!!"); +} + +void DXILBitcodeWriter::writeDIImportedEntity(const DIImportedEntity *N, + SmallVectorImpl &Record, + unsigned Abbrev) { + Record.push_back(N->isDistinct()); + Record.push_back(N->getTag()); + Record.push_back(VE.getMetadataOrNullID(N->getScope())); + Record.push_back(VE.getMetadataOrNullID(N->getEntity())); + Record.push_back(N->getLine()); + Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + + Stream.EmitRecord(bitc::METADATA_IMPORTED_ENTITY, Record, Abbrev); + Record.clear(); +} + +unsigned DXILBitcodeWriter::createDILocationAbbrev() { + // Abbrev for METADATA_LOCATION. + // + // Assume the column is usually under 128, and always output the inlined-at + // location (it's never more expensive than building an array size 1). + std::shared_ptr Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_LOCATION)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); + return Stream.EmitAbbrev(std::move(Abbv)); +} + +unsigned DXILBitcodeWriter::createGenericDINodeAbbrev() { + // Abbrev for METADATA_GENERIC_DEBUG. + // + // Assume the column is usually under 128, and always output the inlined-at + // location (it's never more expensive than building an array size 1). + std::shared_ptr Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_GENERIC_DEBUG)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); + return Stream.EmitAbbrev(std::move(Abbv)); +} + +void DXILBitcodeWriter::writeMetadataRecords(ArrayRef MDs, + SmallVectorImpl &Record, + std::vector *MDAbbrevs, + std::vector *IndexPos) { + if (MDs.empty()) + return; + + // Initialize MDNode abbreviations. +#define HANDLE_MDNODE_LEAF(CLASS) unsigned CLASS##Abbrev = 0; +#include "llvm/IR/Metadata.def" + + for (const Metadata *MD : MDs) { + if (IndexPos) + IndexPos->push_back(Stream.GetCurrentBitNo()); + if (const MDNode *N = dyn_cast(MD)) { + assert(N->isResolved() && "Expected forward references to be resolved"); + + switch (N->getMetadataID()) { + default: + llvm_unreachable("Invalid MDNode subclass"); +#define HANDLE_MDNODE_LEAF(CLASS) \ + case Metadata::CLASS##Kind: \ + if (MDAbbrevs) \ + write##CLASS(cast(N), Record, \ + (*MDAbbrevs)[MetadataAbbrev::CLASS##AbbrevID]); \ + else \ + write##CLASS(cast(N), Record, CLASS##Abbrev); \ + continue; +#include "llvm/IR/Metadata.def" + } + } + writeValueAsMetadata(cast(MD), Record); + } +} + +unsigned DXILBitcodeWriter::createMetadataStringsAbbrev() { + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_STRING_OLD)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); + return Stream.EmitAbbrev(std::move(Abbv)); +} + +void DXILBitcodeWriter::writeMetadataStrings( + ArrayRef Strings, SmallVectorImpl &Record) { + for (const Metadata *MD : Strings) { + const MDString *MDS = cast(MD); + // Code: [strchar x N] + Record.append(MDS->bytes_begin(), MDS->bytes_end()); + + // Emit the finished record. + Stream.EmitRecord(bitc::METADATA_STRING_OLD, Record, + createMetadataStringsAbbrev()); + Record.clear(); + } +} + +void DXILBitcodeWriter::writeModuleMetadata() { + if (!VE.hasMDs() && M.named_metadata_empty()) + return; + + Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 5); + + // Emit all abbrevs upfront, so that the reader can jump in the middle of the + // block and load any metadata. + std::vector MDAbbrevs; + + MDAbbrevs.resize(MetadataAbbrev::LastPlusOne); + MDAbbrevs[MetadataAbbrev::DILocationAbbrevID] = createDILocationAbbrev(); + MDAbbrevs[MetadataAbbrev::GenericDINodeAbbrevID] = + createGenericDINodeAbbrev(); + + unsigned NameAbbrev = 0; + if (!M.named_metadata_empty()) { + // Abbrev for METADATA_NAME. + std::shared_ptr Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_NAME)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); + NameAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + } + + SmallVector Record; + writeMetadataStrings(VE.getMDStrings(), Record); + + std::vector IndexPos; + IndexPos.reserve(VE.getNonMDStrings().size()); + writeMetadataRecords(VE.getNonMDStrings(), Record, &MDAbbrevs, &IndexPos); + + // Write named metadata. + for (const NamedMDNode &NMD : M.named_metadata()) { + // Write name. + StringRef Str = NMD.getName(); + Record.append(Str.bytes_begin(), Str.bytes_end()); + Stream.EmitRecord(bitc::METADATA_NAME, Record, NameAbbrev); + Record.clear(); + + // Write named metadata operands. + for (const MDNode *N : NMD.operands()) + Record.push_back(VE.getMetadataID(N)); + Stream.EmitRecord(bitc::METADATA_NAMED_NODE, Record, 0); + Record.clear(); + } + + Stream.ExitBlock(); +} + +void DXILBitcodeWriter::writeFunctionMetadata(const Function &F) { + if (!VE.hasMDs()) + return; + + Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 4); + SmallVector Record; + writeMetadataStrings(VE.getMDStrings(), Record); + writeMetadataRecords(VE.getNonMDStrings(), Record); + Stream.ExitBlock(); +} + +void DXILBitcodeWriter::writeFunctionMetadataAttachment(const Function &F) { + Stream.EnterSubblock(bitc::METADATA_ATTACHMENT_ID, 3); + + SmallVector Record; + + // Write metadata attachments + // METADATA_ATTACHMENT - [m x [value, [n x [id, mdnode]]] + SmallVector, 4> MDs; + F.getAllMetadata(MDs); + if (!MDs.empty()) { + for (const auto &I : MDs) { + Record.push_back(I.first); + Record.push_back(VE.getMetadataID(I.second)); + } + Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0); + Record.clear(); + } + + for (const BasicBlock &BB : F) + for (const Instruction &I : BB) { + MDs.clear(); + I.getAllMetadataOtherThanDebugLoc(MDs); + + // If no metadata, ignore instruction. + if (MDs.empty()) + continue; + + Record.push_back(VE.getInstructionID(&I)); + + for (unsigned i = 0, e = MDs.size(); i != e; ++i) { + Record.push_back(MDs[i].first); + Record.push_back(VE.getMetadataID(MDs[i].second)); + } + Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0); + Record.clear(); + } + + Stream.ExitBlock(); +} + +void DXILBitcodeWriter::writeModuleMetadataKinds() { + SmallVector Record; + + // Write metadata kinds + // METADATA_KIND - [n x [id, name]] + SmallVector Names; + M.getMDKindNames(Names); + + if (Names.empty()) + return; + + Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3); + + for (unsigned MDKindID = 0, e = Names.size(); MDKindID != e; ++MDKindID) { + Record.push_back(MDKindID); + StringRef KName = Names[MDKindID]; + Record.append(KName.begin(), KName.end()); + + Stream.EmitRecord(bitc::METADATA_KIND, Record, 0); + Record.clear(); + } + + Stream.ExitBlock(); +} + +void DXILBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal, + bool isGlobal) { + if (FirstVal == LastVal) + return; + + Stream.EnterSubblock(bitc::CONSTANTS_BLOCK_ID, 4); + + unsigned AggregateAbbrev = 0; + unsigned String8Abbrev = 0; + unsigned CString7Abbrev = 0; + unsigned CString6Abbrev = 0; + // If this is a constant pool for the module, emit module-specific abbrevs. + if (isGlobal) { + // Abbrev for CST_CODE_AGGREGATE. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_AGGREGATE)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add( + BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, Log2_32_Ceil(LastVal + 1))); + AggregateAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + + // Abbrev for CST_CODE_STRING. + Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_STRING)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); + String8Abbrev = Stream.EmitAbbrev(std::move(Abbv)); + // Abbrev for CST_CODE_CSTRING. + Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); + CString7Abbrev = Stream.EmitAbbrev(std::move(Abbv)); + // Abbrev for CST_CODE_CSTRING. + Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); + CString6Abbrev = Stream.EmitAbbrev(std::move(Abbv)); + } + + SmallVector Record; + + const ValueEnumerator::ValueList &Vals = VE.getValues(); + Type *LastTy = nullptr; + for (unsigned i = FirstVal; i != LastVal; ++i) { + const Value *V = Vals[i].first; + // If we need to switch types, do so now. + if (V->getType() != LastTy) { + LastTy = V->getType(); + Record.push_back(VE.getTypeID(LastTy)); + Stream.EmitRecord(bitc::CST_CODE_SETTYPE, Record, + CONSTANTS_SETTYPE_ABBREV); + Record.clear(); + } + + if (const InlineAsm *IA = dyn_cast(V)) { + Record.push_back(unsigned(IA->hasSideEffects()) | + unsigned(IA->isAlignStack()) << 1 | + unsigned(IA->getDialect() & 1) << 2); + + // Add the asm string. + const std::string &AsmStr = IA->getAsmString(); + Record.push_back(AsmStr.size()); + Record.append(AsmStr.begin(), AsmStr.end()); + + // Add the constraint string. + const std::string &ConstraintStr = IA->getConstraintString(); + Record.push_back(ConstraintStr.size()); + Record.append(ConstraintStr.begin(), ConstraintStr.end()); + Stream.EmitRecord(bitc::CST_CODE_INLINEASM, Record); + Record.clear(); + continue; + } + const Constant *C = cast(V); + unsigned Code = -1U; + unsigned AbbrevToUse = 0; + if (C->isNullValue()) { + Code = bitc::CST_CODE_NULL; + } else if (isa(C)) { + Code = bitc::CST_CODE_UNDEF; + } else if (const ConstantInt *IV = dyn_cast(C)) { + if (IV->getBitWidth() <= 64) { + uint64_t V = IV->getSExtValue(); + emitSignedInt64(Record, V); + Code = bitc::CST_CODE_INTEGER; + AbbrevToUse = CONSTANTS_INTEGER_ABBREV; + } else { // Wide integers, > 64 bits in size. + // We have an arbitrary precision integer value to write whose + // bit width is > 64. However, in canonical unsigned integer + // format it is likely that the high bits are going to be zero. + // So, we only write the number of active words. + unsigned NWords = IV->getValue().getActiveWords(); + const uint64_t *RawWords = IV->getValue().getRawData(); + for (unsigned i = 0; i != NWords; ++i) { + emitSignedInt64(Record, RawWords[i]); + } + Code = bitc::CST_CODE_WIDE_INTEGER; + } + } else if (const ConstantFP *CFP = dyn_cast(C)) { + Code = bitc::CST_CODE_FLOAT; + Type *Ty = CFP->getType(); + if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) { + Record.push_back(CFP->getValueAPF().bitcastToAPInt().getZExtValue()); + } else if (Ty->isX86_FP80Ty()) { + // api needed to prevent premature destruction + // bits are not in the same order as a normal i80 APInt, compensate. + APInt api = CFP->getValueAPF().bitcastToAPInt(); + const uint64_t *p = api.getRawData(); + Record.push_back((p[1] << 48) | (p[0] >> 16)); + Record.push_back(p[0] & 0xffffLL); + } else if (Ty->isFP128Ty() || Ty->isPPC_FP128Ty()) { + APInt api = CFP->getValueAPF().bitcastToAPInt(); + const uint64_t *p = api.getRawData(); + Record.push_back(p[0]); + Record.push_back(p[1]); + } else { + assert(0 && "Unknown FP type!"); + } + } else if (isa(C) && + cast(C)->isString()) { + const ConstantDataSequential *Str = cast(C); + // Emit constant strings specially. + unsigned NumElts = Str->getNumElements(); + // If this is a null-terminated string, use the denser CSTRING encoding. + if (Str->isCString()) { + Code = bitc::CST_CODE_CSTRING; + --NumElts; // Don't encode the null, which isn't allowed by char6. + } else { + Code = bitc::CST_CODE_STRING; + AbbrevToUse = String8Abbrev; + } + bool isCStr7 = Code == bitc::CST_CODE_CSTRING; + bool isCStrChar6 = Code == bitc::CST_CODE_CSTRING; + for (unsigned i = 0; i != NumElts; ++i) { + unsigned char V = Str->getElementAsInteger(i); + Record.push_back(V); + isCStr7 &= (V & 128) == 0; + if (isCStrChar6) + isCStrChar6 = BitCodeAbbrevOp::isChar6(V); + } + + if (isCStrChar6) + AbbrevToUse = CString6Abbrev; + else if (isCStr7) + AbbrevToUse = CString7Abbrev; + } else if (const ConstantDataSequential *CDS = + dyn_cast(C)) { + Code = bitc::CST_CODE_DATA; + Type *EltTy = CDS->getType()->getArrayElementType(); + if (isa(EltTy)) { + for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) + Record.push_back(CDS->getElementAsInteger(i)); + } else if (EltTy->isFloatTy()) { + for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { + union { + float F; + uint32_t I; + }; + F = CDS->getElementAsFloat(i); + Record.push_back(I); + } + } else { + assert(EltTy->isDoubleTy() && "Unknown ConstantData element type"); + for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { + union { + double F; + uint64_t I; + }; + F = CDS->getElementAsDouble(i); + Record.push_back(I); + } + } + } else if (isa(C) || isa(C) || + isa(C)) { + Code = bitc::CST_CODE_AGGREGATE; + for (const Value *Op : C->operands()) + Record.push_back(VE.getValueID(Op)); + AbbrevToUse = AggregateAbbrev; + } else if (const ConstantExpr *CE = dyn_cast(C)) { + switch (CE->getOpcode()) { + default: + if (Instruction::isCast(CE->getOpcode())) { + Code = bitc::CST_CODE_CE_CAST; + Record.push_back(getEncodedCastOpcode(CE->getOpcode())); + Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); + Record.push_back(VE.getValueID(C->getOperand(0))); + AbbrevToUse = CONSTANTS_CE_CAST_Abbrev; + } else { + assert(CE->getNumOperands() == 2 && "Unknown constant expr!"); + Code = bitc::CST_CODE_CE_BINOP; + Record.push_back(getEncodedBinaryOpcode(CE->getOpcode())); + Record.push_back(VE.getValueID(C->getOperand(0))); + Record.push_back(VE.getValueID(C->getOperand(1))); + uint64_t Flags = getOptimizationFlags(CE); + if (Flags != 0) + Record.push_back(Flags); + } + break; + case Instruction::GetElementPtr: { + Code = bitc::CST_CODE_CE_GEP; + const auto *GO = cast(C); + if (GO->isInBounds()) + Code = bitc::CST_CODE_CE_INBOUNDS_GEP; + Record.push_back(VE.getTypeID(GO->getSourceElementType())); + for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) { + Record.push_back(VE.getTypeID(C->getOperand(i)->getType())); + Record.push_back(VE.getValueID(C->getOperand(i))); + } + break; + } + case Instruction::Select: + Code = bitc::CST_CODE_CE_SELECT; + Record.push_back(VE.getValueID(C->getOperand(0))); + Record.push_back(VE.getValueID(C->getOperand(1))); + Record.push_back(VE.getValueID(C->getOperand(2))); + break; + case Instruction::ExtractElement: + Code = bitc::CST_CODE_CE_EXTRACTELT; + Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); + Record.push_back(VE.getValueID(C->getOperand(0))); + Record.push_back(VE.getTypeID(C->getOperand(1)->getType())); + Record.push_back(VE.getValueID(C->getOperand(1))); + break; + case Instruction::InsertElement: + Code = bitc::CST_CODE_CE_INSERTELT; + Record.push_back(VE.getValueID(C->getOperand(0))); + Record.push_back(VE.getValueID(C->getOperand(1))); + Record.push_back(VE.getTypeID(C->getOperand(2)->getType())); + Record.push_back(VE.getValueID(C->getOperand(2))); + break; + case Instruction::ShuffleVector: + // If the return type and argument types are the same, this is a + // standard shufflevector instruction. If the types are different, + // then the shuffle is widening or truncating the input vectors, and + // the argument type must also be encoded. + if (C->getType() == C->getOperand(0)->getType()) { + Code = bitc::CST_CODE_CE_SHUFFLEVEC; + } else { + Code = bitc::CST_CODE_CE_SHUFVEC_EX; + Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); + } + Record.push_back(VE.getValueID(C->getOperand(0))); + Record.push_back(VE.getValueID(C->getOperand(1))); + Record.push_back(VE.getValueID(C->getOperand(2))); + break; + case Instruction::ICmp: + case Instruction::FCmp: + Code = bitc::CST_CODE_CE_CMP; + Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); + Record.push_back(VE.getValueID(C->getOperand(0))); + Record.push_back(VE.getValueID(C->getOperand(1))); + Record.push_back(CE->getPredicate()); + break; + } + } else if (const BlockAddress *BA = dyn_cast(C)) { + Code = bitc::CST_CODE_BLOCKADDRESS; + Record.push_back(VE.getTypeID(BA->getFunction()->getType())); + Record.push_back(VE.getValueID(BA->getFunction())); + Record.push_back(VE.getGlobalBasicBlockID(BA->getBasicBlock())); + } else { +#ifndef NDEBUG + C->dump(); +#endif + llvm_unreachable("Unknown constant!"); + } + Stream.EmitRecord(Code, Record, AbbrevToUse); + Record.clear(); + } + + Stream.ExitBlock(); +} + +void DXILBitcodeWriter::writeModuleConstants() { + const ValueEnumerator::ValueList &Vals = VE.getValues(); + + // Find the first constant to emit, which is the first non-globalvalue value. + // We know globalvalues have been emitted by WriteModuleInfo. + for (unsigned i = 0, e = Vals.size(); i != e; ++i) { + if (!isa(Vals[i].first)) { + writeConstants(i, Vals.size(), true); + return; + } + } +} + +/// pushValueAndType - The file has to encode both the value and type id for +/// many values, because we need to know what type to create for forward +/// references. However, most operands are not forward references, so this type +/// field is not needed. +/// +/// This function adds V's value ID to Vals. If the value ID is higher than the +/// instruction ID, then it is a forward reference, and it also includes the +/// type ID. The value ID that is written is encoded relative to the InstID. +bool DXILBitcodeWriter::pushValueAndType(const Value *V, unsigned InstID, + SmallVectorImpl &Vals) { + unsigned ValID = VE.getValueID(V); + // Make encoding relative to the InstID. + Vals.push_back(InstID - ValID); + if (ValID >= InstID) { + Vals.push_back(VE.getTypeID(V->getType())); + return true; + } + return false; +} + +/// pushValue - Like pushValueAndType, but where the type of the value is +/// omitted (perhaps it was already encoded in an earlier operand). +void DXILBitcodeWriter::pushValue(const Value *V, unsigned InstID, + SmallVectorImpl &Vals) { + unsigned ValID = VE.getValueID(V); + Vals.push_back(InstID - ValID); +} + +void DXILBitcodeWriter::pushValueSigned(const Value *V, unsigned InstID, + SmallVectorImpl &Vals) { + unsigned ValID = VE.getValueID(V); + int64_t diff = ((int32_t)InstID - (int32_t)ValID); + emitSignedInt64(Vals, diff); +} + +/// WriteInstruction - Emit an instruction +void DXILBitcodeWriter::writeInstruction(const Instruction &I, unsigned InstID, + SmallVectorImpl &Vals) { + unsigned Code = 0; + unsigned AbbrevToUse = 0; + VE.setInstructionID(&I); + switch (I.getOpcode()) { + default: + if (Instruction::isCast(I.getOpcode())) { + Code = bitc::FUNC_CODE_INST_CAST; + if (!pushValueAndType(I.getOperand(0), InstID, Vals)) + AbbrevToUse = (unsigned)FUNCTION_INST_CAST_ABBREV; + Vals.push_back(VE.getTypeID(I.getType())); + Vals.push_back(getEncodedCastOpcode(I.getOpcode())); + } else { + assert(isa(I) && "Unknown instruction!"); + Code = bitc::FUNC_CODE_INST_BINOP; + if (!pushValueAndType(I.getOperand(0), InstID, Vals)) + AbbrevToUse = (unsigned)FUNCTION_INST_BINOP_ABBREV; + pushValue(I.getOperand(1), InstID, Vals); + Vals.push_back(getEncodedBinaryOpcode(I.getOpcode())); + uint64_t Flags = getOptimizationFlags(&I); + if (Flags != 0) { + if (AbbrevToUse == (unsigned)FUNCTION_INST_BINOP_ABBREV) + AbbrevToUse = (unsigned)FUNCTION_INST_BINOP_FLAGS_ABBREV; + Vals.push_back(Flags); + } + } + break; + + case Instruction::GetElementPtr: { + Code = bitc::FUNC_CODE_INST_GEP; + AbbrevToUse = (unsigned)FUNCTION_INST_GEP_ABBREV; + auto &GEPInst = cast(I); + Vals.push_back(GEPInst.isInBounds()); + Vals.push_back(VE.getTypeID(GEPInst.getSourceElementType())); + for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) + pushValueAndType(I.getOperand(i), InstID, Vals); + break; + } + case Instruction::ExtractValue: { + Code = bitc::FUNC_CODE_INST_EXTRACTVAL; + pushValueAndType(I.getOperand(0), InstID, Vals); + const ExtractValueInst *EVI = cast(&I); + Vals.append(EVI->idx_begin(), EVI->idx_end()); + break; + } + case Instruction::InsertValue: { + Code = bitc::FUNC_CODE_INST_INSERTVAL; + pushValueAndType(I.getOperand(0), InstID, Vals); + pushValueAndType(I.getOperand(1), InstID, Vals); + const InsertValueInst *IVI = cast(&I); + Vals.append(IVI->idx_begin(), IVI->idx_end()); + break; + } + case Instruction::Select: + Code = bitc::FUNC_CODE_INST_VSELECT; + pushValueAndType(I.getOperand(1), InstID, Vals); + pushValue(I.getOperand(2), InstID, Vals); + pushValueAndType(I.getOperand(0), InstID, Vals); + break; + case Instruction::ExtractElement: + Code = bitc::FUNC_CODE_INST_EXTRACTELT; + pushValueAndType(I.getOperand(0), InstID, Vals); + pushValueAndType(I.getOperand(1), InstID, Vals); + break; + case Instruction::InsertElement: + Code = bitc::FUNC_CODE_INST_INSERTELT; + pushValueAndType(I.getOperand(0), InstID, Vals); + pushValue(I.getOperand(1), InstID, Vals); + pushValueAndType(I.getOperand(2), InstID, Vals); + break; + case Instruction::ShuffleVector: + Code = bitc::FUNC_CODE_INST_SHUFFLEVEC; + pushValueAndType(I.getOperand(0), InstID, Vals); + pushValue(I.getOperand(1), InstID, Vals); + pushValue(I.getOperand(2), InstID, Vals); + break; + case Instruction::ICmp: + case Instruction::FCmp: { + // compare returning Int1Ty or vector of Int1Ty + Code = bitc::FUNC_CODE_INST_CMP2; + pushValueAndType(I.getOperand(0), InstID, Vals); + pushValue(I.getOperand(1), InstID, Vals); + Vals.push_back(cast(I).getPredicate()); + uint64_t Flags = getOptimizationFlags(&I); + if (Flags != 0) + Vals.push_back(Flags); + break; + } + + case Instruction::Ret: { + Code = bitc::FUNC_CODE_INST_RET; + unsigned NumOperands = I.getNumOperands(); + if (NumOperands == 0) + AbbrevToUse = (unsigned)FUNCTION_INST_RET_VOID_ABBREV; + else if (NumOperands == 1) { + if (!pushValueAndType(I.getOperand(0), InstID, Vals)) + AbbrevToUse = (unsigned)FUNCTION_INST_RET_VAL_ABBREV; + } else { + for (unsigned i = 0, e = NumOperands; i != e; ++i) + pushValueAndType(I.getOperand(i), InstID, Vals); + } + } break; + case Instruction::Br: { + Code = bitc::FUNC_CODE_INST_BR; + const BranchInst &II = cast(I); + Vals.push_back(VE.getValueID(II.getSuccessor(0))); + if (II.isConditional()) { + Vals.push_back(VE.getValueID(II.getSuccessor(1))); + pushValue(II.getCondition(), InstID, Vals); + } + } break; + case Instruction::Switch: { + Code = bitc::FUNC_CODE_INST_SWITCH; + const SwitchInst &SI = cast(I); + Vals.push_back(VE.getTypeID(SI.getCondition()->getType())); + pushValue(SI.getCondition(), InstID, Vals); + Vals.push_back(VE.getValueID(SI.getDefaultDest())); + for (auto Case : SI.cases()) { + Vals.push_back(VE.getValueID(Case.getCaseValue())); + Vals.push_back(VE.getValueID(Case.getCaseSuccessor())); + } + } break; + case Instruction::IndirectBr: + Code = bitc::FUNC_CODE_INST_INDIRECTBR; + Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); + // Encode the address operand as relative, but not the basic blocks. + pushValue(I.getOperand(0), InstID, Vals); + for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) + Vals.push_back(VE.getValueID(I.getOperand(i))); + break; + + case Instruction::Invoke: { + const InvokeInst *II = cast(&I); + const Value *Callee = II->getCalledOperand(); + FunctionType *FTy = II->getFunctionType(); + Code = bitc::FUNC_CODE_INST_INVOKE; + + Vals.push_back(VE.getAttributeListID(II->getAttributes())); + Vals.push_back(II->getCallingConv() | 1 << 13); + Vals.push_back(VE.getValueID(II->getNormalDest())); + Vals.push_back(VE.getValueID(II->getUnwindDest())); + Vals.push_back(VE.getTypeID(FTy)); + pushValueAndType(Callee, InstID, Vals); + + // Emit value #'s for the fixed parameters. + for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) + pushValue(I.getOperand(i), InstID, Vals); // fixed param. + + // Emit type/value pairs for varargs params. + if (FTy->isVarArg()) { + for (unsigned i = FTy->getNumParams(), e = I.getNumOperands() - 3; i != e; + ++i) + pushValueAndType(I.getOperand(i), InstID, Vals); // vararg + } + break; + } + case Instruction::Resume: + Code = bitc::FUNC_CODE_INST_RESUME; + pushValueAndType(I.getOperand(0), InstID, Vals); + break; + case Instruction::Unreachable: + Code = bitc::FUNC_CODE_INST_UNREACHABLE; + AbbrevToUse = (unsigned)FUNCTION_INST_UNREACHABLE_ABBREV; + break; + + case Instruction::PHI: { + const PHINode &PN = cast(I); + Code = bitc::FUNC_CODE_INST_PHI; + // With the newer instruction encoding, forward references could give + // negative valued IDs. This is most common for PHIs, so we use + // signed VBRs. + SmallVector Vals64; + Vals64.push_back(VE.getTypeID(PN.getType())); + for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { + pushValueSigned(PN.getIncomingValue(i), InstID, Vals64); + Vals64.push_back(VE.getValueID(PN.getIncomingBlock(i))); + } + // Emit a Vals64 vector and exit. + Stream.EmitRecord(Code, Vals64, AbbrevToUse); + Vals64.clear(); + return; + } + + case Instruction::LandingPad: { + const LandingPadInst &LP = cast(I); + Code = bitc::FUNC_CODE_INST_LANDINGPAD; + Vals.push_back(VE.getTypeID(LP.getType())); + Vals.push_back(LP.isCleanup()); + Vals.push_back(LP.getNumClauses()); + for (unsigned I = 0, E = LP.getNumClauses(); I != E; ++I) { + if (LP.isCatch(I)) + Vals.push_back(LandingPadInst::Catch); + else + Vals.push_back(LandingPadInst::Filter); + pushValueAndType(LP.getClause(I), InstID, Vals); + } + break; + } + + case Instruction::Alloca: { + Code = bitc::FUNC_CODE_INST_ALLOCA; + const AllocaInst &AI = cast(I); + Vals.push_back(VE.getTypeID(AI.getAllocatedType())); + Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); + Vals.push_back(VE.getValueID(I.getOperand(0))); // size. + using APV = AllocaPackedValues; + unsigned Record = 0; + unsigned EncodedAlign = getEncodedAlign(AI.getAlign()); + Bitfield::set( + Record, EncodedAlign & ((1 << APV::AlignLower::Bits) - 1)); + Bitfield::set(Record, + EncodedAlign >> APV::AlignLower::Bits); + Bitfield::set(Record, AI.isUsedWithInAlloca()); + Vals.push_back(Record); + break; + } + + case Instruction::Load: + if (cast(I).isAtomic()) { + Code = bitc::FUNC_CODE_INST_LOADATOMIC; + pushValueAndType(I.getOperand(0), InstID, Vals); + } else { + Code = bitc::FUNC_CODE_INST_LOAD; + if (!pushValueAndType(I.getOperand(0), InstID, Vals)) // ptr + AbbrevToUse = (unsigned)FUNCTION_INST_LOAD_ABBREV; + } + Vals.push_back(VE.getTypeID(I.getType())); + Vals.push_back(Log2_32(cast(I).getAlignment()) + 1); + Vals.push_back(cast(I).isVolatile()); + if (cast(I).isAtomic()) { + Vals.push_back(getEncodedOrdering(cast(I).getOrdering())); + Vals.push_back(getEncodedSyncScopeID(cast(I).getSyncScopeID())); + } + break; + case Instruction::Store: + if (cast(I).isAtomic()) + Code = bitc::FUNC_CODE_INST_STOREATOMIC; + else + Code = bitc::FUNC_CODE_INST_STORE; + pushValueAndType(I.getOperand(1), InstID, Vals); // ptrty + ptr + pushValueAndType(I.getOperand(0), InstID, Vals); // valty + val + Vals.push_back(Log2_32(cast(I).getAlignment()) + 1); + Vals.push_back(cast(I).isVolatile()); + if (cast(I).isAtomic()) { + Vals.push_back(getEncodedOrdering(cast(I).getOrdering())); + Vals.push_back( + getEncodedSyncScopeID(cast(I).getSyncScopeID())); + } + break; + case Instruction::AtomicCmpXchg: + Code = bitc::FUNC_CODE_INST_CMPXCHG; + pushValueAndType(I.getOperand(0), InstID, Vals); // ptrty + ptr + pushValueAndType(I.getOperand(1), InstID, Vals); // cmp. + pushValue(I.getOperand(2), InstID, Vals); // newval. + Vals.push_back(cast(I).isVolatile()); + Vals.push_back( + getEncodedOrdering(cast(I).getSuccessOrdering())); + Vals.push_back( + getEncodedSyncScopeID(cast(I).getSyncScopeID())); + Vals.push_back( + getEncodedOrdering(cast(I).getFailureOrdering())); + Vals.push_back(cast(I).isWeak()); + break; + case Instruction::AtomicRMW: + Code = bitc::FUNC_CODE_INST_ATOMICRMW; + pushValueAndType(I.getOperand(0), InstID, Vals); // ptrty + ptr + pushValue(I.getOperand(1), InstID, Vals); // val. + Vals.push_back( + getEncodedRMWOperation(cast(I).getOperation())); + Vals.push_back(cast(I).isVolatile()); + Vals.push_back(getEncodedOrdering(cast(I).getOrdering())); + Vals.push_back( + getEncodedSyncScopeID(cast(I).getSyncScopeID())); + break; + case Instruction::Fence: + Code = bitc::FUNC_CODE_INST_FENCE; + Vals.push_back(getEncodedOrdering(cast(I).getOrdering())); + Vals.push_back(getEncodedSyncScopeID(cast(I).getSyncScopeID())); + break; + case Instruction::Call: { + const CallInst &CI = cast(I); + FunctionType *FTy = CI.getFunctionType(); + + Code = bitc::FUNC_CODE_INST_CALL; + + Vals.push_back(VE.getAttributeListID(CI.getAttributes())); + Vals.push_back((CI.getCallingConv() << 1) | unsigned(CI.isTailCall()) | + unsigned(CI.isMustTailCall()) << 14 | 1 << 15); + Vals.push_back(VE.getTypeID(FTy)); + pushValueAndType(CI.getCalledOperand(), InstID, Vals); // Callee + + // Emit value #'s for the fixed parameters. + for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) { + // Check for labels (can happen with asm labels). + if (FTy->getParamType(i)->isLabelTy()) + Vals.push_back(VE.getValueID(CI.getArgOperand(i))); + else + pushValue(CI.getArgOperand(i), InstID, Vals); // fixed param. + } + + // Emit type/value pairs for varargs params. + if (FTy->isVarArg()) { + for (unsigned i = FTy->getNumParams(), e = CI.arg_size(); i != e; ++i) + pushValueAndType(CI.getArgOperand(i), InstID, Vals); // varargs + } + break; + } + case Instruction::VAArg: + Code = bitc::FUNC_CODE_INST_VAARG; + Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); // valistty + pushValue(I.getOperand(0), InstID, Vals); // valist. + Vals.push_back(VE.getTypeID(I.getType())); // restype. + break; + } + + Stream.EmitRecord(Code, Vals, AbbrevToUse); + Vals.clear(); +} + +// Emit names for globals/functions etc. +void DXILBitcodeWriter::writeFunctionLevelValueSymbolTable( + const ValueSymbolTable &VST) { + if (VST.empty()) + return; + Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4); + + SmallVector NameVals; + + // HLSL Change + // Read the named values from a sorted list instead of the original list + // to ensure the binary is the same no matter what values ever existed. + SmallVector SortedTable; + + for (auto &VI : VST) { + SortedTable.push_back(VI.second->getValueName()); + } + // The keys are unique, so there shouldn't be stability issues. + std::sort(SortedTable.begin(), SortedTable.end(), + [](const ValueName *A, const ValueName *B) { + return A->first() < B->first(); + }); + + for (const ValueName *SI : SortedTable) { + auto &Name = *SI; + + // Figure out the encoding to use for the name. + bool is7Bit = true; + bool isChar6 = true; + for (const char *C = Name.getKeyData(), *E = C + Name.getKeyLength(); + C != E; ++C) { + if (isChar6) + isChar6 = BitCodeAbbrevOp::isChar6(*C); + if ((unsigned char)*C & 128) { + is7Bit = false; + break; // don't bother scanning the rest. + } + } + + unsigned AbbrevToUse = VST_ENTRY_8_ABBREV; + + // VST_ENTRY: [valueid, namechar x N] + // VST_BBENTRY: [bbid, namechar x N] + unsigned Code; + if (isa(SI->getValue())) { + Code = bitc::VST_CODE_BBENTRY; + if (isChar6) + AbbrevToUse = VST_BBENTRY_6_ABBREV; + } else { + Code = bitc::VST_CODE_ENTRY; + if (isChar6) + AbbrevToUse = VST_ENTRY_6_ABBREV; + else if (is7Bit) + AbbrevToUse = VST_ENTRY_7_ABBREV; + } + + NameVals.push_back(VE.getValueID(SI->getValue())); + for (const char *P = Name.getKeyData(), + *E = Name.getKeyData() + Name.getKeyLength(); + P != E; ++P) + NameVals.push_back((unsigned char)*P); + + // Emit the finished record. + Stream.EmitRecord(Code, NameVals, AbbrevToUse); + NameVals.clear(); + } + Stream.ExitBlock(); +} + +void DXILBitcodeWriter::writeUseList(UseListOrder &&Order) { + assert(Order.Shuffle.size() >= 2 && "Shuffle too small"); + unsigned Code; + if (isa(Order.V)) + Code = bitc::USELIST_CODE_BB; + else + Code = bitc::USELIST_CODE_DEFAULT; + + SmallVector Record(Order.Shuffle.begin(), Order.Shuffle.end()); + Record.push_back(VE.getValueID(Order.V)); + Stream.EmitRecord(Code, Record); +} + +void DXILBitcodeWriter::writeUseListBlock(const Function *F) { + assert(VE.shouldPreserveUseListOrder() && + "Expected to be preserving use-list order"); + + auto hasMore = [&]() { + return !VE.UseListOrders.empty() && VE.UseListOrders.back().F == F; + }; + if (!hasMore()) + // Nothing to do. + return; + + Stream.EnterSubblock(bitc::USELIST_BLOCK_ID, 3); + while (hasMore()) { + writeUseList(std::move(VE.UseListOrders.back())); + VE.UseListOrders.pop_back(); + } + Stream.ExitBlock(); +} + +/// Emit a function body to the module stream. +void DXILBitcodeWriter::writeFunction(const Function &F) { + Stream.EnterSubblock(bitc::FUNCTION_BLOCK_ID, 4); + VE.incorporateFunction(F); + + SmallVector Vals; + + // Emit the number of basic blocks, so the reader can create them ahead of + // time. + Vals.push_back(VE.getBasicBlocks().size()); + Stream.EmitRecord(bitc::FUNC_CODE_DECLAREBLOCKS, Vals); + Vals.clear(); + + // If there are function-local constants, emit them now. + unsigned CstStart, CstEnd; + VE.getFunctionConstantRange(CstStart, CstEnd); + writeConstants(CstStart, CstEnd, false); + + // If there is function-local metadata, emit it now. + writeFunctionMetadata(F); + + // Keep a running idea of what the instruction ID is. + unsigned InstID = CstEnd; + + bool NeedsMetadataAttachment = F.hasMetadata(); + + DILocation *LastDL = nullptr; + + // Finally, emit all the instructions, in order. + for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) + for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; + ++I) { + writeInstruction(*I, InstID, Vals); + + if (!I->getType()->isVoidTy()) + ++InstID; + + // If the instruction has metadata, write a metadata attachment later. + NeedsMetadataAttachment |= I->hasMetadataOtherThanDebugLoc(); + + // If the instruction has a debug location, emit it. + DILocation *DL = I->getDebugLoc(); + if (!DL) + continue; + + if (DL == LastDL) { + // Just repeat the same debug loc as last time. + Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC_AGAIN, Vals); + continue; + } + + Vals.push_back(DL->getLine()); + Vals.push_back(DL->getColumn()); + Vals.push_back(VE.getMetadataOrNullID(DL->getScope())); + Vals.push_back(VE.getMetadataOrNullID(DL->getInlinedAt())); + Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC, Vals); + Vals.clear(); + + LastDL = DL; + } + + // Emit names for all the instructions etc. + if (auto *Symtab = F.getValueSymbolTable()) + writeFunctionLevelValueSymbolTable(*Symtab); + + if (NeedsMetadataAttachment) + writeFunctionMetadataAttachment(F); + if (VE.shouldPreserveUseListOrder()) + writeUseListBlock(&F); + VE.purgeFunction(); + Stream.ExitBlock(); +} + +// Emit blockinfo, which defines the standard abbreviations etc. +void DXILBitcodeWriter::writeBlockInfo() { + // We only want to emit block info records for blocks that have multiple + // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK. + // Other blocks can define their abbrevs inline. + Stream.EnterBlockInfoBlock(); + + { // 8-bit fixed-width VST_ENTRY/VST_BBENTRY strings. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); + if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, + std::move(Abbv)) != VST_ENTRY_8_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + + { // 7-bit fixed width VST_ENTRY strings. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); + if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, + std::move(Abbv)) != VST_ENTRY_7_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + { // 6-bit char6 VST_ENTRY strings. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); + if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, + std::move(Abbv)) != VST_ENTRY_6_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + { // 6-bit char6 VST_BBENTRY strings. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_BBENTRY)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); + if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, + std::move(Abbv)) != VST_BBENTRY_6_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + + { // SETTYPE abbrev for CONSTANTS_BLOCK. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_SETTYPE)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, + VE.computeBitsRequiredForTypeIndicies())); + if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, std::move(Abbv)) != + CONSTANTS_SETTYPE_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + + { // INTEGER abbrev for CONSTANTS_BLOCK. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_INTEGER)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); + if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, std::move(Abbv)) != + CONSTANTS_INTEGER_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + + { // CE_CAST abbrev for CONSTANTS_BLOCK. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CE_CAST)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // cast opc + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // typeid + VE.computeBitsRequiredForTypeIndicies())); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id + + if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, std::move(Abbv)) != + CONSTANTS_CE_CAST_Abbrev) + assert(false && "Unexpected abbrev ordering!"); + } + { // NULL abbrev for CONSTANTS_BLOCK. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_NULL)); + if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, std::move(Abbv)) != + CONSTANTS_NULL_Abbrev) + assert(false && "Unexpected abbrev ordering!"); + } + + // FIXME: This should only use space for first class types! + + { // INST_LOAD abbrev for FUNCTION_BLOCK. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_LOAD)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Ptr + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty + VE.computeBitsRequiredForTypeIndicies())); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // Align + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // volatile + if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, std::move(Abbv)) != + (unsigned)FUNCTION_INST_LOAD_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + { // INST_BINOP abbrev for FUNCTION_BLOCK. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc + if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, std::move(Abbv)) != + (unsigned)FUNCTION_INST_BINOP_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + { // INST_BINOP_FLAGS abbrev for FUNCTION_BLOCK. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); // flags + if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, std::move(Abbv)) != + (unsigned)FUNCTION_INST_BINOP_FLAGS_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + { // INST_CAST abbrev for FUNCTION_BLOCK. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_CAST)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // OpVal + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty + VE.computeBitsRequiredForTypeIndicies())); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc + if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, std::move(Abbv)) != + (unsigned)FUNCTION_INST_CAST_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + + { // INST_RET abbrev for FUNCTION_BLOCK. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET)); + if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, std::move(Abbv)) != + (unsigned)FUNCTION_INST_RET_VOID_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + { // INST_RET abbrev for FUNCTION_BLOCK. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ValID + if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, std::move(Abbv)) != + (unsigned)FUNCTION_INST_RET_VAL_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + { // INST_UNREACHABLE abbrev for FUNCTION_BLOCK. + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_UNREACHABLE)); + if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, std::move(Abbv)) != + (unsigned)FUNCTION_INST_UNREACHABLE_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + { + auto Abbv = std::make_shared(); + Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_GEP)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty + Log2_32_Ceil(VE.getTypes().size() + 1))); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); + Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); + if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, std::move(Abbv)) != + (unsigned)FUNCTION_INST_GEP_ABBREV) + assert(false && "Unexpected abbrev ordering!"); + } + + Stream.ExitBlock(); +} + +void DXILBitcodeWriter::writeModuleVersion() { + // VERSION: [version#] + Stream.EmitRecord(bitc::MODULE_CODE_VERSION, ArrayRef{1}); +} + +/// WriteModule - Emit the specified module to the bitstream. +void DXILBitcodeWriter::write() { + // The identification block is new since llvm-3.7, but the old bitcode reader + // will skip it. + // writeIdentificationBlock(Stream); + + Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3); + + // It is redundant to fully-specify this here, but nice to make it explicit + // so that it is clear the DXIL module version is different. + DXILBitcodeWriter::writeModuleVersion(); + + // Emit blockinfo, which defines the standard abbreviations etc. + writeBlockInfo(); + + // Emit information about attribute groups. + writeAttributeGroupTable(); + + // Emit information about parameter attributes. + writeAttributeTable(); + + // Emit information describing all of the types in the module. + writeTypeTable(); + + writeComdats(); + + // Emit top-level description of module, including target triple, inline asm, + // descriptors for global variables, and function prototype info. + writeModuleInfo(); + + // Emit constants. + writeModuleConstants(); + + // Emit metadata. + writeModuleMetadataKinds(); + + // Emit metadata. + writeModuleMetadata(); + + // Emit names for globals/functions etc. + // DXIL uses the same format for module-level value symbol table as for the + // function level table. + writeFunctionLevelValueSymbolTable(M.getValueSymbolTable()); + + // Emit module-level use-lists. + if (VE.shouldPreserveUseListOrder()) + writeUseListBlock(nullptr); + + // Emit function bodies. + for (const Function &F : M) + if (!F.isDeclaration()) + writeFunction(F); + + Stream.ExitBlock(); +} diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILValueEnumerator.h b/llvm/lib/Target/DirectX/DXILWriter/DXILValueEnumerator.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/DirectX/DXILWriter/DXILValueEnumerator.h @@ -0,0 +1,312 @@ +//===- DirectX/DXILWriter/ValueEnumerator.h - Number values -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class gives values and types Unique ID's. +// Forked from lib/Bitcode/Writer +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DXILWRITER_VALUEENUMERATOR_H +#define LLVM_DXILWRITER_VALUEENUMERATOR_H + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/UniqueVector.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/UseListOrder.h" +#include +#include +#include +#include + +namespace llvm { + +class BasicBlock; +class Comdat; +class DIArgList; +class Function; +class Instruction; +class LocalAsMetadata; +class MDNode; +class Metadata; +class Module; +class NamedMDNode; +class raw_ostream; +class Type; +class Value; +class ValueSymbolTable; + +namespace dxil { + +class ValueEnumerator { +public: + using TypeList = std::vector; + + // For each value, we remember its Value* and occurrence frequency. + using ValueList = std::vector>; + + /// Attribute groups as encoded in bitcode are almost AttributeSets, but they + /// include the AttributeList index, so we have to track that in our map. + using IndexAndAttrSet = std::pair; + + UseListOrderStack UseListOrders; + +private: + using TypeMapType = DenseMap; + TypeMapType TypeMap; + TypeList Types; + + using ValueMapType = DenseMap; + ValueMapType ValueMap; + ValueList Values; + + using ComdatSetType = UniqueVector; + ComdatSetType Comdats; + + std::vector MDs; + std::vector FunctionMDs; + + /// Index of information about a piece of metadata. + struct MDIndex { + unsigned F = 0; ///< The ID of the function for this metadata, if any. + unsigned ID = 0; ///< The implicit ID of this metadata in bitcode. + + MDIndex() = default; + explicit MDIndex(unsigned F) : F(F) {} + + /// Check if this has a function tag, and it's different from NewF. + bool hasDifferentFunction(unsigned NewF) const { return F && F != NewF; } + + /// Fetch the MD this references out of the given metadata array. + const Metadata *get(ArrayRef MDs) const { + assert(ID && "Expected non-zero ID"); + assert(ID <= MDs.size() && "Expected valid ID"); + return MDs[ID - 1]; + } + }; + + using MetadataMapType = DenseMap; + MetadataMapType MetadataMap; + + /// Range of metadata IDs, as a half-open range. + struct MDRange { + unsigned First = 0; + unsigned Last = 0; + + /// Number of strings in the prefix of the metadata range. + unsigned NumStrings = 0; + + MDRange() = default; + explicit MDRange(unsigned First) : First(First) {} + }; + SmallDenseMap FunctionMDInfo; + + bool ShouldPreserveUseListOrder; + + using AttributeGroupMapType = DenseMap; + AttributeGroupMapType AttributeGroupMap; + std::vector AttributeGroups; + + using AttributeListMapType = DenseMap; + AttributeListMapType AttributeListMap; + std::vector AttributeLists; + + /// GlobalBasicBlockIDs - This map memoizes the basic block ID's referenced by + /// the "getGlobalBasicBlockID" method. + mutable DenseMap GlobalBasicBlockIDs; + + using InstructionMapType = DenseMap; + InstructionMapType InstructionMap; + unsigned InstructionCount; + + /// BasicBlocks - This contains all the basic blocks for the currently + /// incorporated function. Their reverse mapping is stored in ValueMap. + std::vector BasicBlocks; + + /// When a function is incorporated, this is the size of the Values list + /// before incorporation. + unsigned NumModuleValues; + + /// When a function is incorporated, this is the size of the Metadatas list + /// before incorporation. + unsigned NumModuleMDs = 0; + unsigned NumMDStrings = 0; + + unsigned FirstFuncConstantID; + unsigned FirstInstID; + +public: + ValueEnumerator(const Module &M, bool ShouldPreserveUseListOrder); + ValueEnumerator(const ValueEnumerator &) = delete; + ValueEnumerator &operator=(const ValueEnumerator &) = delete; + + void dump() const; + void print(raw_ostream &OS, const ValueMapType &Map, const char *Name) const; + void print(raw_ostream &OS, const MetadataMapType &Map, + const char *Name) const; + + unsigned getValueID(const Value *V) const; + + unsigned getMetadataID(const Metadata *MD) const { + auto ID = getMetadataOrNullID(MD); + assert(ID != 0 && "Metadata not in slotcalculator!"); + return ID - 1; + } + + unsigned getMetadataOrNullID(const Metadata *MD) const { + return MetadataMap.lookup(MD).ID; + } + + unsigned numMDs() const { return MDs.size(); } + + bool shouldPreserveUseListOrder() const { return ShouldPreserveUseListOrder; } + + unsigned getTypeID(Type *T) const { + TypeMapType::const_iterator I = TypeMap.find(T); + assert(I != TypeMap.end() && "Type not in ValueEnumerator!"); + return I->second - 1; + } + + unsigned getInstructionID(const Instruction *I) const; + void setInstructionID(const Instruction *I); + + unsigned getAttributeListID(AttributeList PAL) const { + if (PAL.isEmpty()) + return 0; // Null maps to zero. + AttributeListMapType::const_iterator I = AttributeListMap.find(PAL); + assert(I != AttributeListMap.end() && "Attribute not in ValueEnumerator!"); + return I->second; + } + + unsigned getAttributeGroupID(IndexAndAttrSet Group) const { + if (!Group.second.hasAttributes()) + return 0; // Null maps to zero. + AttributeGroupMapType::const_iterator I = AttributeGroupMap.find(Group); + assert(I != AttributeGroupMap.end() && "Attribute not in ValueEnumerator!"); + return I->second; + } + + /// getFunctionConstantRange - Return the range of values that corresponds to + /// function-local constants. + void getFunctionConstantRange(unsigned &Start, unsigned &End) const { + Start = FirstFuncConstantID; + End = FirstInstID; + } + + const ValueList &getValues() const { return Values; } + + /// Check whether the current block has any metadata to emit. + bool hasMDs() const { return NumModuleMDs < MDs.size(); } + + /// Get the MDString metadata for this block. + ArrayRef getMDStrings() const { + return makeArrayRef(MDs).slice(NumModuleMDs, NumMDStrings); + } + + /// Get the non-MDString metadata for this block. + ArrayRef getNonMDStrings() const { + return makeArrayRef(MDs).slice(NumModuleMDs).slice(NumMDStrings); + } + + const TypeList &getTypes() const { return Types; } + + const std::vector &getBasicBlocks() const { + return BasicBlocks; + } + + const std::vector &getAttributeLists() const { + return AttributeLists; + } + + const std::vector &getAttributeGroups() const { + return AttributeGroups; + } + + const ComdatSetType &getComdats() const { return Comdats; } + unsigned getComdatID(const Comdat *C) const; + + /// getGlobalBasicBlockID - This returns the function-specific ID for the + /// specified basic block. This is relatively expensive information, so it + /// should only be used by rare constructs such as address-of-label. + unsigned getGlobalBasicBlockID(const BasicBlock *BB) const; + + /// incorporateFunction/purgeFunction - If you'd like to deal with a function, + /// use these two methods to get its data into the ValueEnumerator! + void incorporateFunction(const Function &F); + + void purgeFunction(); + uint64_t computeBitsRequiredForTypeIndicies() const; + +private: + void OptimizeConstants(unsigned CstStart, unsigned CstEnd); + + /// Reorder the reachable metadata. + /// + /// This is not just an optimization, but is mandatory for emitting MDString + /// correctly. + void organizeMetadata(); + + /// Drop the function tag from the transitive operands of the given node. + void dropFunctionFromMetadata(MetadataMapType::value_type &FirstMD); + + /// Incorporate the function metadata. + /// + /// This should be called before enumerating LocalAsMetadata for the + /// function. + void incorporateFunctionMetadata(const Function &F); + + /// Enumerate a single instance of metadata with the given function tag. + /// + /// If \c MD has already been enumerated, check that \c F matches its + /// function tag. If not, call \a dropFunctionFromMetadata(). + /// + /// Otherwise, mark \c MD as visited. Assign it an ID, or just return it if + /// it's an \a MDNode. + const MDNode *enumerateMetadataImpl(unsigned F, const Metadata *MD); + + unsigned getMetadataFunctionID(const Function *F) const; + + /// Enumerate reachable metadata in (almost) post-order. + /// + /// Enumerate all the metadata reachable from MD. We want to minimize the + /// cost of reading bitcode records, and so the primary consideration is that + /// operands of uniqued nodes are resolved before the nodes are read. This + /// avoids re-uniquing them on the context and factors away RAUW support. + /// + /// This algorithm guarantees that subgraphs of uniqued nodes are in + /// post-order. Distinct subgraphs reachable only from a single uniqued node + /// will be in post-order. + /// + /// \note The relative order of a distinct and uniqued node is irrelevant. + /// \a organizeMetadata() will later partition distinct nodes ahead of + /// uniqued ones. + ///{ + void EnumerateMetadata(const Function *F, const Metadata *MD); + void EnumerateMetadata(unsigned F, const Metadata *MD); + ///} + + void EnumerateFunctionLocalMetadata(const Function &F, + const LocalAsMetadata *Local); + void EnumerateFunctionLocalMetadata(unsigned F, const LocalAsMetadata *Local); + void EnumerateFunctionLocalListMetadata(const Function &F, + const DIArgList *ArgList); + void EnumerateFunctionLocalListMetadata(unsigned F, const DIArgList *Arglist); + void EnumerateNamedMDNode(const NamedMDNode *NMD); + void EnumerateValue(const Value *V); + void EnumerateType(Type *T); + void EnumerateOperandType(const Value *V); + void EnumerateAttributes(AttributeList PAL); + + void EnumerateValueSymbolTable(const ValueSymbolTable &ST); + void EnumerateNamedMetadata(const Module &M); +}; + +} // end namespace dxil +} // end namespace llvm + +#endif // LLVM_DXILWRITER_VALUEENUMERATOR_H diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILValueEnumerator.cpp b/llvm/lib/Target/DirectX/DXILWriter/DXILValueEnumerator.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/DirectX/DXILWriter/DXILValueEnumerator.cpp @@ -0,0 +1,1188 @@ +//===- ValueEnumerator.cpp - Number values and types for bitcode writer ---===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the ValueEnumerator class. +// Forked from lib/Bitcode/Writer +// +//===----------------------------------------------------------------------===// + +#include "DXILValueEnumerator.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/IR/Argument.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/DebugInfoMetadata.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalIFunc.h" +#include "llvm/IR/GlobalObject.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Operator.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/Use.h" +#include "llvm/IR/User.h" +#include "llvm/IR/Value.h" +#include "llvm/IR/ValueSymbolTable.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include +#include +#include +#include + +using namespace llvm; +using namespace llvm::dxil; + +namespace { + +struct OrderMap { + DenseMap> IDs; + unsigned LastGlobalConstantID = 0; + unsigned LastGlobalValueID = 0; + + OrderMap() = default; + + bool isGlobalConstant(unsigned ID) const { + return ID <= LastGlobalConstantID; + } + + bool isGlobalValue(unsigned ID) const { + return ID <= LastGlobalValueID && !isGlobalConstant(ID); + } + + unsigned size() const { return IDs.size(); } + std::pair &operator[](const Value *V) { return IDs[V]; } + + std::pair lookup(const Value *V) const { + return IDs.lookup(V); + } + + void index(const Value *V) { + // Explicitly sequence get-size and insert-value operations to avoid UB. + unsigned ID = IDs.size() + 1; + IDs[V].first = ID; + } +}; + +} // end anonymous namespace + +static void orderValue(const Value *V, OrderMap &OM) { + if (OM.lookup(V).first) + return; + + if (const Constant *C = dyn_cast(V)) { + if (C->getNumOperands() && !isa(C)) { + for (const Value *Op : C->operands()) + if (!isa(Op) && !isa(Op)) + orderValue(Op, OM); + if (auto *CE = dyn_cast(C)) + if (CE->getOpcode() == Instruction::ShuffleVector) + orderValue(CE->getShuffleMaskForBitcode(), OM); + } + } + + // Note: we cannot cache this lookup above, since inserting into the map + // changes the map's size, and thus affects the other IDs. + OM.index(V); +} + +static OrderMap orderModule(const Module &M) { + // This needs to match the order used by ValueEnumerator::ValueEnumerator() + // and ValueEnumerator::incorporateFunction(). + OrderMap OM; + + // In the reader, initializers of GlobalValues are set *after* all the + // globals have been read. Rather than awkwardly modeling this behaviour + // directly in predictValueUseListOrderImpl(), just assign IDs to + // initializers of GlobalValues before GlobalValues themselves to model this + // implicitly. + for (const GlobalVariable &G : M.globals()) + if (G.hasInitializer()) + if (!isa(G.getInitializer())) + orderValue(G.getInitializer(), OM); + for (const GlobalAlias &A : M.aliases()) + if (!isa(A.getAliasee())) + orderValue(A.getAliasee(), OM); + for (const GlobalIFunc &I : M.ifuncs()) + if (!isa(I.getResolver())) + orderValue(I.getResolver(), OM); + for (const Function &F : M) { + for (const Use &U : F.operands()) + if (!isa(U.get())) + orderValue(U.get(), OM); + } + + // As constants used in metadata operands are emitted as module-level + // constants, we must order them before other operands. Also, we must order + // these before global values, as these will be read before setting the + // global values' initializers. The latter matters for constants which have + // uses towards other constants that are used as initializers. + auto orderConstantValue = [&OM](const Value *V) { + if ((isa(V) && !isa(V)) || isa(V)) + orderValue(V, OM); + }; + for (const Function &F : M) { + if (F.isDeclaration()) + continue; + for (const BasicBlock &BB : F) + for (const Instruction &I : BB) + for (const Value *V : I.operands()) { + if (const auto *MAV = dyn_cast(V)) { + if (const auto *VAM = + dyn_cast(MAV->getMetadata())) { + orderConstantValue(VAM->getValue()); + } else if (const auto *AL = + dyn_cast(MAV->getMetadata())) { + for (const auto *VAM : AL->getArgs()) + orderConstantValue(VAM->getValue()); + } + } + } + } + OM.LastGlobalConstantID = OM.size(); + + // Initializers of GlobalValues are processed in + // BitcodeReader::ResolveGlobalAndAliasInits(). Match the order there rather + // than ValueEnumerator, and match the code in predictValueUseListOrderImpl() + // by giving IDs in reverse order. + // + // Since GlobalValues never reference each other directly (just through + // initializers), their relative IDs only matter for determining order of + // uses in their initializers. + for (const Function &F : M) + orderValue(&F, OM); + for (const GlobalAlias &A : M.aliases()) + orderValue(&A, OM); + for (const GlobalIFunc &I : M.ifuncs()) + orderValue(&I, OM); + for (const GlobalVariable &G : M.globals()) + orderValue(&G, OM); + OM.LastGlobalValueID = OM.size(); + + for (const Function &F : M) { + if (F.isDeclaration()) + continue; + // Here we need to match the union of ValueEnumerator::incorporateFunction() + // and WriteFunction(). Basic blocks are implicitly declared before + // anything else (by declaring their size). + for (const BasicBlock &BB : F) + orderValue(&BB, OM); + for (const Argument &A : F.args()) + orderValue(&A, OM); + for (const BasicBlock &BB : F) + for (const Instruction &I : BB) { + for (const Value *Op : I.operands()) + if ((isa(*Op) && !isa(*Op)) || + isa(*Op)) + orderValue(Op, OM); + if (auto *SVI = dyn_cast(&I)) + orderValue(SVI->getShuffleMaskForBitcode(), OM); + } + for (const BasicBlock &BB : F) + for (const Instruction &I : BB) + orderValue(&I, OM); + } + return OM; +} + +static void predictValueUseListOrderImpl(const Value *V, const Function *F, + unsigned ID, const OrderMap &OM, + UseListOrderStack &Stack) { + // Predict use-list order for this one. + using Entry = std::pair; + SmallVector List; + for (const Use &U : V->uses()) + // Check if this user will be serialized. + if (OM.lookup(U.getUser()).first) + List.push_back(std::make_pair(&U, List.size())); + + if (List.size() < 2) + // We may have lost some users. + return; + + bool IsGlobalValue = OM.isGlobalValue(ID); + llvm::sort(List, [&](const Entry &L, const Entry &R) { + const Use *LU = L.first; + const Use *RU = R.first; + if (LU == RU) + return false; + + auto LID = OM.lookup(LU->getUser()).first; + auto RID = OM.lookup(RU->getUser()).first; + + // Global values are processed in reverse order. + // + // Moreover, initializers of GlobalValues are set *after* all the globals + // have been read (despite having earlier IDs). Rather than awkwardly + // modeling this behaviour here, orderModule() has assigned IDs to + // initializers of GlobalValues before GlobalValues themselves. + if (OM.isGlobalValue(LID) && OM.isGlobalValue(RID)) { + if (LID == RID) + return LU->getOperandNo() > RU->getOperandNo(); + return LID < RID; + } + + // If ID is 4, then expect: 7 6 5 1 2 3. + if (LID < RID) { + if (RID <= ID) + if (!IsGlobalValue) // GlobalValue uses don't get reversed. + return true; + return false; + } + if (RID < LID) { + if (LID <= ID) + if (!IsGlobalValue) // GlobalValue uses don't get reversed. + return false; + return true; + } + + // LID and RID are equal, so we have different operands of the same user. + // Assume operands are added in order for all instructions. + if (LID <= ID) + if (!IsGlobalValue) // GlobalValue uses don't get reversed. + return LU->getOperandNo() < RU->getOperandNo(); + return LU->getOperandNo() > RU->getOperandNo(); + }); + + if (llvm::is_sorted(List, [](const Entry &L, const Entry &R) { + return L.second < R.second; + })) + // Order is already correct. + return; + + // Store the shuffle. + Stack.emplace_back(V, F, List.size()); + assert(List.size() == Stack.back().Shuffle.size() && "Wrong size"); + for (size_t I = 0, E = List.size(); I != E; ++I) + Stack.back().Shuffle[I] = List[I].second; +} + +static void predictValueUseListOrder(const Value *V, const Function *F, + OrderMap &OM, UseListOrderStack &Stack) { + auto &IDPair = OM[V]; + assert(IDPair.first && "Unmapped value"); + if (IDPair.second) + // Already predicted. + return; + + // Do the actual prediction. + IDPair.second = true; + if (!V->use_empty() && std::next(V->use_begin()) != V->use_end()) + predictValueUseListOrderImpl(V, F, IDPair.first, OM, Stack); + + // Recursive descent into constants. + if (const Constant *C = dyn_cast(V)) { + if (C->getNumOperands()) { // Visit GlobalValues. + for (const Value *Op : C->operands()) + if (isa(Op)) // Visit GlobalValues. + predictValueUseListOrder(Op, F, OM, Stack); + if (auto *CE = dyn_cast(C)) + if (CE->getOpcode() == Instruction::ShuffleVector) + predictValueUseListOrder(CE->getShuffleMaskForBitcode(), F, OM, + Stack); + } + } +} + +static UseListOrderStack predictUseListOrder(const Module &M) { + OrderMap OM = orderModule(M); + + // Use-list orders need to be serialized after all the users have been added + // to a value, or else the shuffles will be incomplete. Store them per + // function in a stack. + // + // Aside from function order, the order of values doesn't matter much here. + UseListOrderStack Stack; + + // We want to visit the functions backward now so we can list function-local + // constants in the last Function they're used in. Module-level constants + // have already been visited above. + for (const Function &F : llvm::reverse(M)) { + if (F.isDeclaration()) + continue; + for (const BasicBlock &BB : F) + predictValueUseListOrder(&BB, &F, OM, Stack); + for (const Argument &A : F.args()) + predictValueUseListOrder(&A, &F, OM, Stack); + for (const BasicBlock &BB : F) + for (const Instruction &I : BB) { + for (const Value *Op : I.operands()) + if (isa(*Op) || isa(*Op)) // Visit GlobalValues. + predictValueUseListOrder(Op, &F, OM, Stack); + if (auto *SVI = dyn_cast(&I)) + predictValueUseListOrder(SVI->getShuffleMaskForBitcode(), &F, OM, + Stack); + } + for (const BasicBlock &BB : F) + for (const Instruction &I : BB) + predictValueUseListOrder(&I, &F, OM, Stack); + } + + // Visit globals last, since the module-level use-list block will be seen + // before the function bodies are processed. + for (const GlobalVariable &G : M.globals()) + predictValueUseListOrder(&G, nullptr, OM, Stack); + for (const Function &F : M) + predictValueUseListOrder(&F, nullptr, OM, Stack); + for (const GlobalAlias &A : M.aliases()) + predictValueUseListOrder(&A, nullptr, OM, Stack); + for (const GlobalIFunc &I : M.ifuncs()) + predictValueUseListOrder(&I, nullptr, OM, Stack); + for (const GlobalVariable &G : M.globals()) + if (G.hasInitializer()) + predictValueUseListOrder(G.getInitializer(), nullptr, OM, Stack); + for (const GlobalAlias &A : M.aliases()) + predictValueUseListOrder(A.getAliasee(), nullptr, OM, Stack); + for (const GlobalIFunc &I : M.ifuncs()) + predictValueUseListOrder(I.getResolver(), nullptr, OM, Stack); + for (const Function &F : M) { + for (const Use &U : F.operands()) + predictValueUseListOrder(U.get(), nullptr, OM, Stack); + } + + return Stack; +} + +static bool isIntOrIntVectorValue(const std::pair &V) { + return V.first->getType()->isIntOrIntVectorTy(); +} + +ValueEnumerator::ValueEnumerator(const Module &M, + bool ShouldPreserveUseListOrder) + : ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) { + if (ShouldPreserveUseListOrder) + UseListOrders = predictUseListOrder(M); + + // Enumerate the global variables. + for (const GlobalVariable &GV : M.globals()) { + EnumerateValue(&GV); + EnumerateType(GV.getValueType()); + } + + // Enumerate the functions. + for (const Function &F : M) { + EnumerateValue(&F); + EnumerateType(F.getValueType()); + EnumerateAttributes(F.getAttributes()); + } + + // Enumerate the aliases. + for (const GlobalAlias &GA : M.aliases()) { + EnumerateValue(&GA); + EnumerateType(GA.getValueType()); + } + + // Enumerate the ifuncs. + for (const GlobalIFunc &GIF : M.ifuncs()) { + EnumerateValue(&GIF); + EnumerateType(GIF.getValueType()); + } + + // Remember what is the cutoff between globalvalue's and other constants. + unsigned FirstConstant = Values.size(); + + // Enumerate the global variable initializers and attributes. + for (const GlobalVariable &GV : M.globals()) { + if (GV.hasInitializer()) + EnumerateValue(GV.getInitializer()); + if (GV.hasAttributes()) + EnumerateAttributes(GV.getAttributesAsList(AttributeList::FunctionIndex)); + } + + // Enumerate the aliasees. + for (const GlobalAlias &GA : M.aliases()) + EnumerateValue(GA.getAliasee()); + + // Enumerate the ifunc resolvers. + for (const GlobalIFunc &GIF : M.ifuncs()) + EnumerateValue(GIF.getResolver()); + + // Enumerate any optional Function data. + for (const Function &F : M) + for (const Use &U : F.operands()) + EnumerateValue(U.get()); + + // Enumerate the metadata type. + // + // TODO: Move this to ValueEnumerator::EnumerateOperandType() once bitcode + // only encodes the metadata type when it's used as a value. + EnumerateType(Type::getMetadataTy(M.getContext())); + + // Insert constants and metadata that are named at module level into the slot + // pool so that the module symbol table can refer to them... + EnumerateValueSymbolTable(M.getValueSymbolTable()); + EnumerateNamedMetadata(M); + + SmallVector, 8> MDs; + for (const GlobalVariable &GV : M.globals()) { + MDs.clear(); + GV.getAllMetadata(MDs); + for (const auto &I : MDs) + // FIXME: Pass GV to EnumerateMetadata and arrange for the bitcode writer + // to write metadata to the global variable's own metadata block + // (PR28134). + EnumerateMetadata(nullptr, I.second); + } + + // Enumerate types used by function bodies and argument lists. + for (const Function &F : M) { + for (const Argument &A : F.args()) + EnumerateType(A.getType()); + + // Enumerate metadata attached to this function. + MDs.clear(); + F.getAllMetadata(MDs); + for (const auto &I : MDs) + EnumerateMetadata(F.isDeclaration() ? nullptr : &F, I.second); + + for (const BasicBlock &BB : F) + for (const Instruction &I : BB) { + for (const Use &Op : I.operands()) { + auto *MD = dyn_cast(&Op); + if (!MD) { + EnumerateOperandType(Op); + continue; + } + + // Local metadata is enumerated during function-incorporation, but + // any ConstantAsMetadata arguments in a DIArgList should be examined + // now. + if (isa(MD->getMetadata())) + continue; + if (auto *AL = dyn_cast(MD->getMetadata())) { + for (auto *VAM : AL->getArgs()) + if (isa(VAM)) + EnumerateMetadata(&F, VAM); + continue; + } + + EnumerateMetadata(&F, MD->getMetadata()); + } + if (auto *SVI = dyn_cast(&I)) + EnumerateType(SVI->getShuffleMaskForBitcode()->getType()); + if (auto *GEP = dyn_cast(&I)) + EnumerateType(GEP->getSourceElementType()); + if (auto *AI = dyn_cast(&I)) + EnumerateType(AI->getAllocatedType()); + EnumerateType(I.getType()); + if (const auto *Call = dyn_cast(&I)) { + EnumerateAttributes(Call->getAttributes()); + EnumerateType(Call->getFunctionType()); + } + + // Enumerate metadata attached with this instruction. + MDs.clear(); + I.getAllMetadataOtherThanDebugLoc(MDs); + for (unsigned i = 0, e = MDs.size(); i != e; ++i) + EnumerateMetadata(&F, MDs[i].second); + + // Don't enumerate the location directly -- it has a special record + // type -- but enumerate its operands. + if (DILocation *L = I.getDebugLoc()) + for (const Metadata *Op : L->operands()) + EnumerateMetadata(&F, Op); + } + } + + // Optimize constant ordering. + OptimizeConstants(FirstConstant, Values.size()); + + // Organize metadata ordering. + organizeMetadata(); +} + +unsigned ValueEnumerator::getInstructionID(const Instruction *Inst) const { + InstructionMapType::const_iterator I = InstructionMap.find(Inst); + assert(I != InstructionMap.end() && "Instruction is not mapped!"); + return I->second; +} + +unsigned ValueEnumerator::getComdatID(const Comdat *C) const { + unsigned ComdatID = Comdats.idFor(C); + assert(ComdatID && "Comdat not found!"); + return ComdatID; +} + +void ValueEnumerator::setInstructionID(const Instruction *I) { + InstructionMap[I] = InstructionCount++; +} + +unsigned ValueEnumerator::getValueID(const Value *V) const { + if (auto *MD = dyn_cast(V)) + return getMetadataID(MD->getMetadata()); + + ValueMapType::const_iterator I = ValueMap.find(V); + assert(I != ValueMap.end() && "Value not in slotcalculator!"); + return I->second - 1; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void ValueEnumerator::dump() const { + print(dbgs(), ValueMap, "Default"); + dbgs() << '\n'; + print(dbgs(), MetadataMap, "MetaData"); + dbgs() << '\n'; +} +#endif + +void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map, + const char *Name) const { + OS << "Map Name: " << Name << "\n"; + OS << "Size: " << Map.size() << "\n"; + for (const auto &I : Map) { + const Value *V = I.first; + if (V->hasName()) + OS << "Value: " << V->getName(); + else + OS << "Value: [null]\n"; + V->print(errs()); + errs() << '\n'; + + OS << " Uses(" << V->getNumUses() << "):"; + for (const Use &U : V->uses()) { + if (&U != &*V->use_begin()) + OS << ","; + if (U->hasName()) + OS << " " << U->getName(); + else + OS << " [null]"; + } + OS << "\n\n"; + } +} + +void ValueEnumerator::print(raw_ostream &OS, const MetadataMapType &Map, + const char *Name) const { + OS << "Map Name: " << Name << "\n"; + OS << "Size: " << Map.size() << "\n"; + for (const auto &I : Map) { + const Metadata *MD = I.first; + OS << "Metadata: slot = " << I.second.ID << "\n"; + OS << "Metadata: function = " << I.second.F << "\n"; + MD->print(OS); + OS << "\n"; + } +} + +/// OptimizeConstants - Reorder constant pool for denser encoding. +void ValueEnumerator::OptimizeConstants(unsigned CstStart, unsigned CstEnd) { + if (CstStart == CstEnd || CstStart + 1 == CstEnd) + return; + + if (ShouldPreserveUseListOrder) + // Optimizing constants makes the use-list order difficult to predict. + // Disable it for now when trying to preserve the order. + return; + + std::stable_sort(Values.begin() + CstStart, Values.begin() + CstEnd, + [this](const std::pair &LHS, + const std::pair &RHS) { + // Sort by plane. + if (LHS.first->getType() != RHS.first->getType()) + return getTypeID(LHS.first->getType()) < + getTypeID(RHS.first->getType()); + // Then by frequency. + return LHS.second > RHS.second; + }); + + // Ensure that integer and vector of integer constants are at the start of the + // constant pool. This is important so that GEP structure indices come before + // gep constant exprs. + std::stable_partition(Values.begin() + CstStart, Values.begin() + CstEnd, + isIntOrIntVectorValue); + + // Rebuild the modified portion of ValueMap. + for (; CstStart != CstEnd; ++CstStart) + ValueMap[Values[CstStart].first] = CstStart + 1; +} + +/// EnumerateValueSymbolTable - Insert all of the values in the specified symbol +/// table into the values table. +void ValueEnumerator::EnumerateValueSymbolTable(const ValueSymbolTable &VST) { + for (ValueSymbolTable::const_iterator VI = VST.begin(), VE = VST.end(); + VI != VE; ++VI) + EnumerateValue(VI->getValue()); +} + +/// Insert all of the values referenced by named metadata in the specified +/// module. +void ValueEnumerator::EnumerateNamedMetadata(const Module &M) { + for (const auto &I : M.named_metadata()) + EnumerateNamedMDNode(&I); +} + +void ValueEnumerator::EnumerateNamedMDNode(const NamedMDNode *MD) { + for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i) + EnumerateMetadata(nullptr, MD->getOperand(i)); +} + +unsigned ValueEnumerator::getMetadataFunctionID(const Function *F) const { + return F ? getValueID(F) + 1 : 0; +} + +void ValueEnumerator::EnumerateMetadata(const Function *F, const Metadata *MD) { + EnumerateMetadata(getMetadataFunctionID(F), MD); +} + +void ValueEnumerator::EnumerateFunctionLocalMetadata( + const Function &F, const LocalAsMetadata *Local) { + EnumerateFunctionLocalMetadata(getMetadataFunctionID(&F), Local); +} + +void ValueEnumerator::EnumerateFunctionLocalListMetadata( + const Function &F, const DIArgList *ArgList) { + EnumerateFunctionLocalListMetadata(getMetadataFunctionID(&F), ArgList); +} + +void ValueEnumerator::dropFunctionFromMetadata( + MetadataMapType::value_type &FirstMD) { + SmallVector Worklist; + auto push = [&Worklist](MetadataMapType::value_type &MD) { + auto &Entry = MD.second; + + // Nothing to do if this metadata isn't tagged. + if (!Entry.F) + return; + + // Drop the function tag. + Entry.F = 0; + + // If this is has an ID and is an MDNode, then its operands have entries as + // well. We need to drop the function from them too. + if (Entry.ID) + if (auto *N = dyn_cast(MD.first)) + Worklist.push_back(N); + }; + push(FirstMD); + while (!Worklist.empty()) + for (const Metadata *Op : Worklist.pop_back_val()->operands()) { + if (!Op) + continue; + auto MD = MetadataMap.find(Op); + if (MD != MetadataMap.end()) + push(*MD); + } +} + +void ValueEnumerator::EnumerateMetadata(unsigned F, const Metadata *MD) { + // It's vital for reader efficiency that uniqued subgraphs are done in + // post-order; it's expensive when their operands have forward references. + // If a distinct node is referenced from a uniqued node, it'll be delayed + // until the uniqued subgraph has been completely traversed. + SmallVector DelayedDistinctNodes; + + // Start by enumerating MD, and then work through its transitive operands in + // post-order. This requires a depth-first search. + SmallVector, 32> Worklist; + if (const MDNode *N = enumerateMetadataImpl(F, MD)) + Worklist.push_back(std::make_pair(N, N->op_begin())); + + while (!Worklist.empty()) { + const MDNode *N = Worklist.back().first; + + // Enumerate operands until we hit a new node. We need to traverse these + // nodes' operands before visiting the rest of N's operands. + MDNode::op_iterator I = std::find_if( + Worklist.back().second, N->op_end(), + [&](const Metadata *MD) { return enumerateMetadataImpl(F, MD); }); + if (I != N->op_end()) { + auto *Op = cast(*I); + Worklist.back().second = ++I; + + // Delay traversing Op if it's a distinct node and N is uniqued. + if (Op->isDistinct() && !N->isDistinct()) + DelayedDistinctNodes.push_back(Op); + else + Worklist.push_back(std::make_pair(Op, Op->op_begin())); + continue; + } + + // All the operands have been visited. Now assign an ID. + Worklist.pop_back(); + MDs.push_back(N); + MetadataMap[N].ID = MDs.size(); + + // Flush out any delayed distinct nodes; these are all the distinct nodes + // that are leaves in last uniqued subgraph. + if (Worklist.empty() || Worklist.back().first->isDistinct()) { + for (const MDNode *N : DelayedDistinctNodes) + Worklist.push_back(std::make_pair(N, N->op_begin())); + DelayedDistinctNodes.clear(); + } + } +} + +const MDNode *ValueEnumerator::enumerateMetadataImpl(unsigned F, + const Metadata *MD) { + if (!MD) + return nullptr; + + assert( + (isa(MD) || isa(MD) || isa(MD)) && + "Invalid metadata kind"); + + auto Insertion = MetadataMap.insert(std::make_pair(MD, MDIndex(F))); + MDIndex &Entry = Insertion.first->second; + if (!Insertion.second) { + // Already mapped. If F doesn't match the function tag, drop it. + if (Entry.hasDifferentFunction(F)) + dropFunctionFromMetadata(*Insertion.first); + return nullptr; + } + + // Don't assign IDs to metadata nodes. + if (auto *N = dyn_cast(MD)) + return N; + + // Save the metadata. + MDs.push_back(MD); + Entry.ID = MDs.size(); + + // Enumerate the constant, if any. + if (auto *C = dyn_cast(MD)) + EnumerateValue(C->getValue()); + + return nullptr; +} + +/// EnumerateFunctionLocalMetadata - Incorporate function-local metadata +/// information reachable from the metadata. +void ValueEnumerator::EnumerateFunctionLocalMetadata( + unsigned F, const LocalAsMetadata *Local) { + assert(F && "Expected a function"); + + // Check to see if it's already in! + MDIndex &Index = MetadataMap[Local]; + if (Index.ID) { + assert(Index.F == F && "Expected the same function"); + return; + } + + MDs.push_back(Local); + Index.F = F; + Index.ID = MDs.size(); + + EnumerateValue(Local->getValue()); +} + +/// EnumerateFunctionLocalListMetadata - Incorporate function-local metadata +/// information reachable from the metadata. +void ValueEnumerator::EnumerateFunctionLocalListMetadata( + unsigned F, const DIArgList *ArgList) { + assert(F && "Expected a function"); + + // Check to see if it's already in! + MDIndex &Index = MetadataMap[ArgList]; + if (Index.ID) { + assert(Index.F == F && "Expected the same function"); + return; + } + + for (ValueAsMetadata *VAM : ArgList->getArgs()) { + if (isa(VAM)) { + assert(MetadataMap.count(VAM) && + "LocalAsMetadata should be enumerated before DIArgList"); + assert(MetadataMap[VAM].F == F && + "Expected LocalAsMetadata in the same function"); + } else { + assert(isa(VAM) && + "Expected LocalAsMetadata or ConstantAsMetadata"); + assert(ValueMap.count(VAM->getValue()) && + "Constant should be enumerated beforeDIArgList"); + EnumerateMetadata(F, VAM); + } + } + + MDs.push_back(ArgList); + Index.F = F; + Index.ID = MDs.size(); +} + +static unsigned getMetadataTypeOrder(const Metadata *MD) { + // Strings are emitted in bulk and must come first. + if (isa(MD)) + return 0; + + // ConstantAsMetadata doesn't reference anything. We may as well shuffle it + // to the front since we can detect it. + auto *N = dyn_cast(MD); + if (!N) + return 1; + + // The reader is fast forward references for distinct node operands, but slow + // when uniqued operands are unresolved. + return N->isDistinct() ? 2 : 3; +} + +void ValueEnumerator::organizeMetadata() { + assert(MetadataMap.size() == MDs.size() && + "Metadata map and vector out of sync"); + + if (MDs.empty()) + return; + + // Copy out the index information from MetadataMap in order to choose a new + // order. + SmallVector Order; + Order.reserve(MetadataMap.size()); + for (const Metadata *MD : MDs) + Order.push_back(MetadataMap.lookup(MD)); + + // Partition: + // - by function, then + // - by isa + // and then sort by the original/current ID. Since the IDs are guaranteed to + // be unique, the result of std::sort will be deterministic. There's no need + // for std::stable_sort. + llvm::sort(Order, [this](MDIndex LHS, MDIndex RHS) { + return std::make_tuple(LHS.F, getMetadataTypeOrder(LHS.get(MDs)), LHS.ID) < + std::make_tuple(RHS.F, getMetadataTypeOrder(RHS.get(MDs)), RHS.ID); + }); + + // Rebuild MDs, index the metadata ranges for each function in FunctionMDs, + // and fix up MetadataMap. + std::vector OldMDs; + MDs.swap(OldMDs); + MDs.reserve(OldMDs.size()); + for (unsigned I = 0, E = Order.size(); I != E && !Order[I].F; ++I) { + auto *MD = Order[I].get(OldMDs); + MDs.push_back(MD); + MetadataMap[MD].ID = I + 1; + if (isa(MD)) + ++NumMDStrings; + } + + // Return early if there's nothing for the functions. + if (MDs.size() == Order.size()) + return; + + // Build the function metadata ranges. + MDRange R; + FunctionMDs.reserve(OldMDs.size()); + unsigned PrevF = 0; + for (unsigned I = MDs.size(), E = Order.size(), ID = MDs.size(); I != E; + ++I) { + unsigned F = Order[I].F; + if (!PrevF) { + PrevF = F; + } else if (PrevF != F) { + R.Last = FunctionMDs.size(); + std::swap(R, FunctionMDInfo[PrevF]); + R.First = FunctionMDs.size(); + + ID = MDs.size(); + PrevF = F; + } + + auto *MD = Order[I].get(OldMDs); + FunctionMDs.push_back(MD); + MetadataMap[MD].ID = ++ID; + if (isa(MD)) + ++R.NumStrings; + } + R.Last = FunctionMDs.size(); + FunctionMDInfo[PrevF] = R; +} + +void ValueEnumerator::incorporateFunctionMetadata(const Function &F) { + NumModuleMDs = MDs.size(); + + auto R = FunctionMDInfo.lookup(getValueID(&F) + 1); + NumMDStrings = R.NumStrings; + MDs.insert(MDs.end(), FunctionMDs.begin() + R.First, + FunctionMDs.begin() + R.Last); +} + +void ValueEnumerator::EnumerateValue(const Value *V) { + assert(!V->getType()->isVoidTy() && "Can't insert void values!"); + assert(!isa(V) && "EnumerateValue doesn't handle Metadata!"); + + // Check to see if it's already in! + unsigned &ValueID = ValueMap[V]; + if (ValueID) { + // Increment use count. + Values[ValueID - 1].second++; + return; + } + + if (auto *GO = dyn_cast(V)) + if (const Comdat *C = GO->getComdat()) + Comdats.insert(C); + + // Enumerate the type of this value. + EnumerateType(V->getType()); + + if (const Constant *C = dyn_cast(V)) { + if (isa(C)) { + // Initializers for globals are handled explicitly elsewhere. + } else if (C->getNumOperands()) { + // If a constant has operands, enumerate them. This makes sure that if a + // constant has uses (for example an array of const ints), that they are + // inserted also. + + // We prefer to enumerate them with values before we enumerate the user + // itself. This makes it more likely that we can avoid forward references + // in the reader. We know that there can be no cycles in the constants + // graph that don't go through a global variable. + for (User::const_op_iterator I = C->op_begin(), E = C->op_end(); I != E; + ++I) + if (!isa(*I)) // Don't enumerate BB operand to BlockAddress. + EnumerateValue(*I); + if (auto *CE = dyn_cast(C)) { + if (CE->getOpcode() == Instruction::ShuffleVector) + EnumerateValue(CE->getShuffleMaskForBitcode()); + if (auto *GEP = dyn_cast(CE)) + EnumerateType(GEP->getSourceElementType()); + } + + // Finally, add the value. Doing this could make the ValueID reference be + // dangling, don't reuse it. + Values.push_back(std::make_pair(V, 1U)); + ValueMap[V] = Values.size(); + return; + } + } + + // Add the value. + Values.push_back(std::make_pair(V, 1U)); + ValueID = Values.size(); +} + +void ValueEnumerator::EnumerateType(Type *Ty) { + unsigned *TypeID = &TypeMap[Ty]; + + // We've already seen this type. + if (*TypeID) + return; + + // If it is a non-anonymous struct, mark the type as being visited so that we + // don't recursively visit it. This is safe because we allow forward + // references of these in the bitcode reader. + if (StructType *STy = dyn_cast(Ty)) + if (!STy->isLiteral()) + *TypeID = ~0U; + + // Enumerate all of the subtypes before we enumerate this type. This ensures + // that the type will be enumerated in an order that can be directly built. + for (Type *SubTy : Ty->subtypes()) + EnumerateType(SubTy); + + // Refresh the TypeID pointer in case the table rehashed. + TypeID = &TypeMap[Ty]; + + // Check to see if we got the pointer another way. This can happen when + // enumerating recursive types that hit the base case deeper than they start. + // + // If this is actually a struct that we are treating as forward ref'able, + // then emit the definition now that all of its contents are available. + if (*TypeID && *TypeID != ~0U) + return; + + // Add this type now that its contents are all happily enumerated. + Types.push_back(Ty); + + *TypeID = Types.size(); +} + +// Enumerate the types for the specified value. If the value is a constant, +// walk through it, enumerating the types of the constant. +void ValueEnumerator::EnumerateOperandType(const Value *V) { + EnumerateType(V->getType()); + + assert(!isa(V) && "Unexpected metadata operand"); + + const Constant *C = dyn_cast(V); + if (!C) + return; + + // If this constant is already enumerated, ignore it, we know its type must + // be enumerated. + if (ValueMap.count(C)) + return; + + // This constant may have operands, make sure to enumerate the types in + // them. + for (const Value *Op : C->operands()) { + // Don't enumerate basic blocks here, this happens as operands to + // blockaddress. + if (isa(Op)) + continue; + + EnumerateOperandType(Op); + } + if (auto *CE = dyn_cast(C)) { + if (CE->getOpcode() == Instruction::ShuffleVector) + EnumerateOperandType(CE->getShuffleMaskForBitcode()); + if (CE->getOpcode() == Instruction::GetElementPtr) + EnumerateType(cast(CE)->getSourceElementType()); + } +} + +void ValueEnumerator::EnumerateAttributes(AttributeList PAL) { + if (PAL.isEmpty()) + return; // null is always 0. + + // Do a lookup. + unsigned &Entry = AttributeListMap[PAL]; + if (Entry == 0) { + // Never saw this before, add it. + AttributeLists.push_back(PAL); + Entry = AttributeLists.size(); + } + + // Do lookups for all attribute groups. + for (unsigned i : PAL.indexes()) { + AttributeSet AS = PAL.getAttributes(i); + if (!AS.hasAttributes()) + continue; + IndexAndAttrSet Pair = {i, AS}; + unsigned &Entry = AttributeGroupMap[Pair]; + if (Entry == 0) { + AttributeGroups.push_back(Pair); + Entry = AttributeGroups.size(); + + for (Attribute Attr : AS) { + if (Attr.isTypeAttribute()) + EnumerateType(Attr.getValueAsType()); + } + } + } +} + +void ValueEnumerator::incorporateFunction(const Function &F) { + InstructionCount = 0; + NumModuleValues = Values.size(); + + // Add global metadata to the function block. This doesn't include + // LocalAsMetadata. + incorporateFunctionMetadata(F); + + // Adding function arguments to the value table. + for (const auto &I : F.args()) { + EnumerateValue(&I); + if (I.hasAttribute(Attribute::ByVal)) + EnumerateType(I.getParamByValType()); + else if (I.hasAttribute(Attribute::StructRet)) + EnumerateType(I.getParamStructRetType()); + else if (I.hasAttribute(Attribute::ByRef)) + EnumerateType(I.getParamByRefType()); + } + FirstFuncConstantID = Values.size(); + + // Add all function-level constants to the value table. + for (const BasicBlock &BB : F) { + for (const Instruction &I : BB) { + for (const Use &OI : I.operands()) { + if ((isa(OI) && !isa(OI)) || isa(OI)) + EnumerateValue(OI); + } + if (auto *SVI = dyn_cast(&I)) + EnumerateValue(SVI->getShuffleMaskForBitcode()); + } + BasicBlocks.push_back(&BB); + ValueMap[&BB] = BasicBlocks.size(); + } + + // Optimize the constant layout. + OptimizeConstants(FirstFuncConstantID, Values.size()); + + // Add the function's parameter attributes so they are available for use in + // the function's instruction. + EnumerateAttributes(F.getAttributes()); + + FirstInstID = Values.size(); + + SmallVector FnLocalMDVector; + SmallVector ArgListMDVector; + // Add all of the instructions. + for (const BasicBlock &BB : F) { + for (const Instruction &I : BB) { + for (const Use &OI : I.operands()) { + if (auto *MD = dyn_cast(&OI)) { + if (auto *Local = dyn_cast(MD->getMetadata())) { + // Enumerate metadata after the instructions they might refer to. + FnLocalMDVector.push_back(Local); + } else if (auto *ArgList = dyn_cast(MD->getMetadata())) { + ArgListMDVector.push_back(ArgList); + for (ValueAsMetadata *VMD : ArgList->getArgs()) { + if (auto *Local = dyn_cast(VMD)) { + // Enumerate metadata after the instructions they might refer + // to. + FnLocalMDVector.push_back(Local); + } + } + } + } + } + + if (!I.getType()->isVoidTy()) + EnumerateValue(&I); + } + } + + // Add all of the function-local metadata. + for (unsigned i = 0, e = FnLocalMDVector.size(); i != e; ++i) { + // At this point, every local values have been incorporated, we shouldn't + // have a metadata operand that references a value that hasn't been seen. + assert(ValueMap.count(FnLocalMDVector[i]->getValue()) && + "Missing value for metadata operand"); + EnumerateFunctionLocalMetadata(F, FnLocalMDVector[i]); + } + // DIArgList entries must come after function-local metadata, as it is not + // possible to forward-reference them. + for (const DIArgList *ArgList : ArgListMDVector) + EnumerateFunctionLocalListMetadata(F, ArgList); +} + +void ValueEnumerator::purgeFunction() { + /// Remove purged values from the ValueMap. + for (unsigned i = NumModuleValues, e = Values.size(); i != e; ++i) + ValueMap.erase(Values[i].first); + for (unsigned i = NumModuleMDs, e = MDs.size(); i != e; ++i) + MetadataMap.erase(MDs[i]); + for (const BasicBlock *BB : BasicBlocks) + ValueMap.erase(BB); + + Values.resize(NumModuleValues); + MDs.resize(NumModuleMDs); + BasicBlocks.clear(); + NumMDStrings = 0; +} + +static void IncorporateFunctionInfoGlobalBBIDs( + const Function *F, DenseMap &IDMap) { + unsigned Counter = 0; + for (const BasicBlock &BB : *F) + IDMap[&BB] = ++Counter; +} + +/// getGlobalBasicBlockID - This returns the function-specific ID for the +/// specified basic block. This is relatively expensive information, so it +/// should only be used by rare constructs such as address-of-label. +unsigned ValueEnumerator::getGlobalBasicBlockID(const BasicBlock *BB) const { + unsigned &Idx = GlobalBasicBlockIDs[BB]; + if (Idx != 0) + return Idx - 1; + + IncorporateFunctionInfoGlobalBBIDs(BB->getParent(), GlobalBasicBlockIDs); + return getGlobalBasicBlockID(BB); +} + +uint64_t ValueEnumerator::computeBitsRequiredForTypeIndicies() const { + return Log2_32_Ceil(getTypes().size() + 1); +} diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILWriterPass.h b/llvm/lib/Target/DirectX/DXILWriter/DXILWriterPass.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/DirectX/DXILWriter/DXILWriterPass.h @@ -0,0 +1,32 @@ +//===-- DXILWriterPass.h - Bitcode writing pass --------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// +/// This file provides a bitcode writing pass. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_BITCODE_DXILWriterPass_H +#define LLVM_BITCODE_DXILWriterPass_H + +#include "DirectX.h" +#include "llvm/Bitcode/BitcodeWriter.h" +#include "llvm/IR/PassManager.h" + +namespace llvm { +class Module; +class raw_ostream; + +/// Create and return a pass that writes the module to the specified +/// ostream. Note that this pass is designed for use with the legacy pass +/// manager. +ModulePass *createDXILWriterPass(raw_ostream &Str); + +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILWriterPass.cpp b/llvm/lib/Target/DirectX/DXILWriter/DXILWriterPass.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/DirectX/DXILWriter/DXILWriterPass.cpp @@ -0,0 +1,61 @@ +//===- DXILWriterPass.cpp - Bitcode writing pass --------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// DXILWriterPass implementation. +// +//===----------------------------------------------------------------------===// + +#include "DXILWriterPass.h" +#include "DXILBitcodeWriter.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Analysis/ModuleSummaryAnalysis.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/PassManager.h" +#include "llvm/InitializePasses.h" +#include "llvm/Pass.h" + +using namespace llvm; +using namespace llvm::dxil; + +namespace { +class WriteDXILPass : public llvm::ModulePass { + raw_ostream &OS; // raw_ostream to print on + +public: + static char ID; // Pass identification, replacement for typeid + WriteDXILPass() : ModulePass(ID), OS(dbgs()) { + initializeWriteDXILPassPass(*PassRegistry::getPassRegistry()); + } + + explicit WriteDXILPass(raw_ostream &o) : ModulePass(ID), OS(o) { + initializeWriteDXILPassPass(*PassRegistry::getPassRegistry()); + } + + StringRef getPassName() const override { return "Bitcode Writer"; } + + bool runOnModule(Module &M) override { + WriteDXILToFile(M, OS); + return false; + } + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesAll(); + } +}; +} // namespace + +char WriteDXILPass::ID = 0; +INITIALIZE_PASS_BEGIN(WriteDXILPass, "write-bitcode", "Write Bitcode", false, + true) +INITIALIZE_PASS_DEPENDENCY(ModuleSummaryIndexWrapperPass) +INITIALIZE_PASS_END(WriteDXILPass, "write-bitcode", "Write Bitcode", false, + true) + +ModulePass *llvm::createDXILWriterPass(raw_ostream &Str) { + return new WriteDXILPass(Str); +} diff --git a/llvm/lib/Target/DirectX/DirectX.h b/llvm/lib/Target/DirectX/DirectX.h --- a/llvm/lib/Target/DirectX/DirectX.h +++ b/llvm/lib/Target/DirectX/DirectX.h @@ -15,6 +15,9 @@ class ModulePass; class PassRegistry; +/// Initializer for dxil writer pass +void initializeWriteDXILPassPass(PassRegistry &); + /// Initializer for DXIL-prepare void initializeDXILPrepareModulePass(PassRegistry &); diff --git a/llvm/lib/Target/DirectX/DirectXTargetMachine.cpp b/llvm/lib/Target/DirectX/DirectXTargetMachine.cpp --- a/llvm/lib/Target/DirectX/DirectXTargetMachine.cpp +++ b/llvm/lib/Target/DirectX/DirectXTargetMachine.cpp @@ -12,11 +12,11 @@ //===----------------------------------------------------------------------===// #include "DirectXTargetMachine.h" +#include "DXILWriter/DXILWriterPass.h" #include "DirectX.h" #include "DirectXSubtarget.h" #include "DirectXTargetTransformInfo.h" #include "TargetInfo/DirectXTargetInfo.h" -#include "llvm/Bitcode/BitcodeWriterPass.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/TargetPassConfig.h" #include "llvm/IR/IRPrintingPasses.h" @@ -90,8 +90,8 @@ PM.add(createPrintModulePass(Out, "", true)); break; case CGFT_ObjectFile: - // TODO: Write DXIL instead of bitcode - PM.add(createBitcodeWriterPass(Out, true, false, false)); + // TODO: Use MC Object streamer to write DXContainer + PM.add(createDXILWriterPass(Out)); break; case CGFT_Null: break; diff --git a/llvm/test/CMakeLists.txt b/llvm/test/CMakeLists.txt --- a/llvm/test/CMakeLists.txt +++ b/llvm/test/CMakeLists.txt @@ -20,6 +20,7 @@ LLVM_INLINER_MODEL_AUTOGENERATED LLVM_RAEVICT_MODEL_AUTOGENERATED LLVM_ENABLE_EXPENSIVE_CHECKS + LLVM_INCLUDE_DXIL_TESTS ) configure_lit_site_cfg( @@ -211,6 +212,10 @@ ) endif() +if (LLVM_INCLUDE_DXIL_TESTS) + list(APPEND LLVM_TEST_DEPENDS dxil-dis) +endif() + add_custom_target(llvm-test-depends DEPENDS ${LLVM_TEST_DEPENDS}) set_target_properties(llvm-test-depends PROPERTIES FOLDER "Tests") diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py --- a/llvm/test/lit.cfg.py +++ b/llvm/test/lit.cfg.py @@ -189,7 +189,8 @@ ToolSubst('OrcV2CBindingsRemovableCode', unresolved='ignore'), ToolSubst('OrcV2CBindingsReflectProcessSymbols', unresolved='ignore'), ToolSubst('OrcV2CBindingsLazy', unresolved='ignore'), - ToolSubst('OrcV2CBindingsVeryLazy', unresolved='ignore')]) + ToolSubst('OrcV2CBindingsVeryLazy', unresolved='ignore'), + ToolSubst('dxil-dis', unresolved='ignore')]) llvm_config.add_tool_substitutions(tools, config.llvm_tools_dir) diff --git a/llvm/test/lit.site.cfg.py.in b/llvm/test/lit.site.cfg.py.in --- a/llvm/test/lit.site.cfg.py.in +++ b/llvm/test/lit.site.cfg.py.in @@ -57,6 +57,7 @@ config.llvm_inliner_model_autogenerated = @LLVM_INLINER_MODEL_AUTOGENERATED@ config.llvm_raevict_model_autogenerated = @LLVM_RAEVICT_MODEL_AUTOGENERATED@ config.expensive_checks = @LLVM_ENABLE_EXPENSIVE_CHECKS@ +config.dxil_tests = @LLVM_INCLUDE_DXIL_TESTS@ import lit.llvm lit.llvm.initialize(lit_config, config) diff --git a/llvm/test/tools/dxil-dis/BasicIR.ll b/llvm/test/tools/dxil-dis/BasicIR.ll new file mode 100644 --- /dev/null +++ b/llvm/test/tools/dxil-dis/BasicIR.ll @@ -0,0 +1,15 @@ +; RUN: llc --filetype=obj %s -o - | dxil-dis -o - | FileCheck %s + +; CHECK: define i32 @foo(i32 %X, i32 %Y) { +; CHECK: %Z = sub i32 %X, %Y +; CHECK: %Q = add i32 %Z, %Y +; CHECK: ret i32 %Q +; CHECK: } + +target triple = "dxil-unknown-unknown" + +define i32 @foo(i32 %X, i32 %Y) { + %Z = sub i32 %X, %Y + %Q = add i32 %Z, %Y + ret i32 %Q +} diff --git a/llvm/test/tools/dxil-dis/attribute-filter.ll b/llvm/test/tools/dxil-dis/attribute-filter.ll new file mode 100644 --- /dev/null +++ b/llvm/test/tools/dxil-dis/attribute-filter.ll @@ -0,0 +1,15 @@ +; RUN: llc %s --filetype=obj -o - | dxil-dis -o - | FileCheck %s + +; CHECK: target triple = "dxil-unknown-unknown" +target triple = "dxil-unknown-unknown" + +; CHECK: Function Attrs: nounwind readnone +; Function Attrs: norecurse nounwind readnone willreturn +define float @fma(float %0, float %1, float %2) #0 { + %4 = fmul float %0, %1 + %5 = fadd float %4, %2 + ret float %5 +} + +; CHECK: attributes #0 = { nounwind readnone "disable-tail-calls"="false" } +attributes #0 = { norecurse nounwind readnone willreturn "disable-tail-calls"="false" } diff --git a/llvm/test/tools/dxil-dis/debug-info.ll b/llvm/test/tools/dxil-dis/debug-info.ll new file mode 100644 --- /dev/null +++ b/llvm/test/tools/dxil-dis/debug-info.ll @@ -0,0 +1,66 @@ +; RUN: llc --filetype=obj %s -o - | dxil-dis -o - | FileCheck %s +target triple = "dxil-unknown-unknown" +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" + +; CHECK: define float @fma(float, float, float) unnamed_addr #0 !dbg !6 +; Function Attrs: norecurse nounwind readnone willreturn +define dso_local float @fma(float %0, float %1, float %2) local_unnamed_addr #0 !dbg !6 { +; CHECK-NEXT: call void @llvm.dbg.value(metadata float %0, metadata !11, metadata !14), !dbg !15 +; CHECK-NEXT: call void @llvm.dbg.value(metadata float %1, metadata !12, metadata !14), !dbg !15 +; CHECK-NEXT: call void @llvm.dbg.value(metadata float %2, metadata !13, metadata !14), !dbg !15 + call void @llvm.dbg.value(metadata float %0, metadata !11, metadata !DIExpression()), !dbg !14 + call void @llvm.dbg.value(metadata float %1, metadata !12, metadata !DIExpression()), !dbg !14 + call void @llvm.dbg.value(metadata float %2, metadata !13, metadata !DIExpression()), !dbg !14 +; CHECK-NEXT: %4 = fmul float %0, %1, !dbg !16 +; CHECK-NEXT: %5 = fadd float %4, %2, !dbg !17 + %4 = fmul float %0, %1, !dbg !15 + %5 = fadd float %4, %2, !dbg !16 + ret float %5, !dbg !17 +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare void @llvm.dbg.value(metadata, metadata, metadata) #1 + +attributes #0 = { norecurse nounwind readnone willreturn } +attributes #1 = { nofree nosync nounwind readnone speculatable willreturn } + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4} +!llvm.ident = !{!5} + +; Other tests verify that we come back with reasonable structure for the debug +; info types, this test just needs to ensure they are there. +; The patch this is paired with fixes a bug where function debug info wasn't +; being emitted correctly even though other tests verified the MD would be +; emitted if it was referenced as module metadata. + +; CHECK: !0 = distinct !DICompileUnit +; CHECK-NEXT: !1 = !DIFile(filename: +; CHECK: !6 = distinct !DISubprogram(name: "fma", +; CHECK: !11 = !DILocalVariable(tag: +; CHECK-NEXT: !12 = !DILocalVariable(tag: +; CHECK-NEXT: !13 = !DILocalVariable(tag: +; CHECK-NEXT: !14 = !DIExpression() +; CHECK-NEXT: !15 = !DILocation(line: +; CHECK-NEXT: !16 = !DILocation(line: +; CHECK-NEXT: !17 = !DILocation(line: +; CHECK-NEXT: !18 = !DILocation(line: + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, splitDebugInlining: false, nameTableKind: None) +!1 = !DIFile(filename: "in.c", directory: "dir") +!2 = !{} +!3 = !{i32 7, !"Dwarf Version", i32 2} +!4 = !{i32 2, !"Debug Info Version", i32 3} +!5 = !{!"Some Compiler"} +!6 = distinct !DISubprogram(name: "fma", scope: !1, file: !1, line: 1, type: !7, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !10) +!7 = !DISubroutineType(types: !8) +!8 = !{!9, !9, !9, !9} +!9 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float) +!10 = !{!11, !12, !13} +!11 = !DILocalVariable(name: "x", arg: 1, scope: !6, file: !1, line: 1, type: !9) +!12 = !DILocalVariable(name: "y", arg: 2, scope: !6, file: !1, line: 1, type: !9) +!13 = !DILocalVariable(name: "z", arg: 3, scope: !6, file: !1, line: 1, type: !9) +!14 = !DILocation(line: 0, scope: !6) +!15 = !DILocation(line: 2, column: 12, scope: !6) +!16 = !DILocation(line: 2, column: 16, scope: !6) +!17 = !DILocation(line: 2, column: 3, scope: !6) diff --git a/llvm/test/tools/dxil-dis/di-compile-unit.ll b/llvm/test/tools/dxil-dis/di-compile-unit.ll new file mode 100644 --- /dev/null +++ b/llvm/test/tools/dxil-dis/di-compile-unit.ll @@ -0,0 +1,17 @@ +; RUN: llc --filetype=obj %s -o - | dxil-dis -o - | FileCheck %s +target triple = "dxil-unknown-unknown" + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "Some Compiler", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, splitDebugInlining: false, nameTableKind: None) +!1 = !DIFile(filename: "di-compile-unit.src", directory: "/some-path") +!2 = !{} +!3 = !{i32 7, !"Dwarf Version", i32 2} +!4 = !{i32 2, !"Debug Info Version", i32 3} + +; CHECK: !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "Some Compiler", isOptimized: true, runtimeVersion: 0, emissionKind: 1, enums: !2) +; CHECK: !1 = !DIFile(filename: "di-compile-unit.src", directory: "/some-path") +; CHECK: !2 = !{} +; CHECK: !3 = !{i32 7, !"Dwarf Version", i32 2} +; CHECK: !4 = !{i32 2, !"Debug Info Version", i32 3} diff --git a/llvm/test/tools/dxil-dis/di-subprogram.ll b/llvm/test/tools/dxil-dis/di-subprogram.ll new file mode 100644 --- /dev/null +++ b/llvm/test/tools/dxil-dis/di-subprogram.ll @@ -0,0 +1,53 @@ +; RUN: llc --filetype=obj %s -o - | dxil-dis -o - | FileCheck %s +target triple = "dxil-unknown-unknown" + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4} +!llvm.used = !{!5} +!llvm.lines = !{!13, !14, !15, !16} + +; CHECK: !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "Some Compiler", isOptimized: true, runtimeVersion: 0, emissionKind: 1, enums: !2) +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "Some Compiler", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, splitDebugInlining: false, nameTableKind: None) +; CHECK: !1 = !DIFile(filename: "some-source", directory: "some-path") +!1 = !DIFile(filename: "some-source", directory: "some-path") +!2 = !{} + +; CHECK: !3 = !{i32 7, !"Dwarf Version", i32 2} +!3 = !{i32 7, !"Dwarf Version", i32 2} +; CHECK: !4 = !{i32 2, !"Debug Info Version", i32 3} +!4 = !{i32 2, !"Debug Info Version", i32 3} + +; CHECK: !5 = distinct !DISubprogram(name: "fma", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, function: !0, variables: !9) +!5 = distinct !DISubprogram(name: "fma", scope: !1, file: !1, line: 1, type: !6, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !9) + +; CHECK: !6 = !DISubroutineType(types: !7) +!6 = !DISubroutineType(types: !7) + +; CHECK: !7 = !{!8, !8, !8, !8} +!7 = !{!8, !8, !8, !8} + +; CHECK: !8 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float) +!8 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float) + +; CHECK: !9 = !{!10, !11, !12} +!9 = !{!10, !11, !12} + +; CHECK: !10 = !DILocalVariable(tag: DW_TAG_variable, name: "x", arg: 1, scope: !5, file: !1, line: 1, type: !8) +!10 = !DILocalVariable(name: "x", arg: 1, scope: !5, file: !1, line: 1, type: !8) + +; CHECK: !11 = !DILocalVariable(tag: DW_TAG_variable, name: "y", arg: 2, scope: !5, file: !1, line: 1, type: !8) +!11 = !DILocalVariable(name: "y", arg: 2, scope: !5, file: !1, line: 1, type: !8) + +; CHECK: !12 = !DILocalVariable(tag: DW_TAG_variable, name: "z", arg: 3, scope: !5, file: !1, line: 1, type: !8) +!12 = !DILocalVariable(name: "z", arg: 3, scope: !5, file: !1, line: 1, type: !8) + + +; CHECK: !13 = !DILocation(line: 0, scope: !5) +; CHECK: !14 = !DILocation(line: 2, column: 12, scope: !5) +; CHECK: !15 = !DILocation(line: 2, column: 16, scope: !5) +; CHECK: !16 = !DILocation(line: 2, column: 3, scope: !5) + +!13 = !DILocation(line: 0, scope: !5) +!14 = !DILocation(line: 2, column: 12, scope: !5) +!15 = !DILocation(line: 2, column: 16, scope: !5) +!16 = !DILocation(line: 2, column: 3, scope: !5) diff --git a/llvm/test/tools/dxil-dis/di-subrotine.ll b/llvm/test/tools/dxil-dis/di-subrotine.ll new file mode 100644 --- /dev/null +++ b/llvm/test/tools/dxil-dis/di-subrotine.ll @@ -0,0 +1,12 @@ +; RUN: llc --filetype=obj %s -o - | dxil-dis -o - | FileCheck %s +target triple = "dxil-unknown-unknown" + +!llvm.used = !{!0} + +!0 = !DISubroutineType(types: !1) +!1 = !{!2, !2, !2, !2} +!2 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float) + +; CHECK: !0 = !DISubroutineType(types: !1) +; CHECK: !1 = !{!2, !2, !2, !2} +; CHECK: !2 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float) diff --git a/llvm/test/tools/dxil-dis/lit.local.cfg b/llvm/test/tools/dxil-dis/lit.local.cfg new file mode 100644 --- /dev/null +++ b/llvm/test/tools/dxil-dis/lit.local.cfg @@ -0,0 +1,3 @@ +if not config.dxil_tests: + config.unsupported = True +config.suffixes = ['.ll'] diff --git a/llvm/test/tools/dxil-dis/metadata.ll b/llvm/test/tools/dxil-dis/metadata.ll new file mode 100644 --- /dev/null +++ b/llvm/test/tools/dxil-dis/metadata.ll @@ -0,0 +1,13 @@ +; RUN: llc --filetype=obj %s -o - | dxil-dis +target triple = "dxil-unknown-unknown" + +!llvm.foo = !{!0} +!llvm.bar = !{!1} + +!0 = !{i32 42} +!1 = !{!"Some MDString"} + +; CHECK: !llvm.foo = !{!0} +; CHECK: !llvm.bar = !{!1} +; CHECK: !0 = !{i32 42} +; CHECK: !1 = !{!"Some MDString"} diff --git a/llvm/tools/dxil-dis/CMakeLists.txt b/llvm/tools/dxil-dis/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/llvm/tools/dxil-dis/CMakeLists.txt @@ -0,0 +1,48 @@ +option(LLVM_INCLUDE_DXIL_TESTS "Include DXIL tests" Off) +mark_as_advanced(LLVM_INCLUDE_DXIL_TESTS) + +if (NOT LLVM_INCLUDE_DXIL_TESTS) + return() +endif () + +if (NOT "DirectX" IN_LIST LLVM_TARGETS_TO_BUILD) + message(FATAL_ERROR "Building dxil-dis tests is unsupported without the DirectX target") +endif () + +if (CMAKE_HOST_UNIX) + set(LLVM_LINK_OR_COPY create_symlink) +else () + set(LLVM_LINK_OR_COPY copy) +endif () + +if (DXIL_DIS) + add_custom_target(dxil-dis + COMMAND ${CMAKE_COMMAND} -E ${LLVM_LINK_OR_COPY} "${DXIL_DIS}" "${LLVM_RUNTIME_OUTPUT_INTDIR}/dxil-dis") + return() +endif () + +include(ExternalProject) + +set(SOURCE_DIR ${CMAKE_CURRENT_BINARY_DIR}/DXC-src) +set(BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/DXC-bins) +set(GIT_SETTINGS GIT_REPOSITORY https://github.com/microsoft/DirectXShaderCompiler.git) + +if (DXC_SOURCE_DIR) + set(SOURCE_DIR ${DXC_SOURCE_DIR}) + unset(GIT_SETTINGS) +endif () + +ExternalProject_Add(DXC + ${GIT_SETTINGS} + SOURCE_DIR ${SOURCE_DIR} + BINARY_DIR ${BINARY_DIR} + CMAKE_ARGS -C ${SOURCE_DIR}/cmake/caches/PredefinedParams.cmake -DLLVM_INCLUDE_TESTS=On + BUILD_COMMAND ${CMAKE_COMMAND} --build ${BINARY_DIR} --target llvm-dis + BUILD_BYPRODUCTS ${BINARY_DIR}/bin/llvm-dis + INSTALL_COMMAND "" + ) + +add_custom_target(dxil-dis + COMMAND ${CMAKE_COMMAND} -E ${LLVM_LINK_OR_COPY} "${BINARY_DIR}/bin/llvm-dis${CMAKE_EXECUTABLE_SUFFIX}" "${LLVM_RUNTIME_OUTPUT_INTDIR}/dxil-dis${CMAKE_EXECUTABLE_SUFFIX}" + DEPENDS DXC + )