Index: lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp =================================================================== --- lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp +++ lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp @@ -164,7 +164,7 @@ // Look for register defs and register masks. Register masks are // typically on calls and they clobber everything not in the mask. for (const MachineOperand &MO : MI.operands()) { - if (MO.isReg() && MO.isDef() && MO.getReg()) { + if (MO.isReg() && MO.isDef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI) Regs.set(*AI); @@ -191,7 +191,7 @@ // Not a DBG_VALUE instruction. It may clobber registers which describe // some variables. for (const MachineOperand &MO : MI.operands()) { - if (MO.isReg() && MO.isDef() && MO.getReg()) { + if (MO.isReg() && MO.isDef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { // If this is a register def operand, it may end a debug value // range. for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); @@ -238,7 +238,7 @@ if (!MBB.empty() && &MBB != &MF->back()) { for (auto I = RegVars.begin(), E = RegVars.end(); I != E;) { auto CurElem = I++; // CurElem can be erased below. - if (ChangingRegs.test(CurElem->first)) + if (TargetRegisterInfo::isPhysicalRegister(CurElem->first) && ChangingRegs.test(CurElem->first)) clobberRegisterUses(RegVars, CurElem, Result, MBB.back()); } } Index: lib/CodeGen/LiveIntervalAnalysis.cpp =================================================================== --- lib/CodeGen/LiveIntervalAnalysis.cpp +++ lib/CodeGen/LiveIntervalAnalysis.cpp @@ -1322,6 +1322,8 @@ continue; const MachineInstr &MI = *MO.getParent(); + if (MI.isDebugValue()) + continue; SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI); if (InstSlot > LastUse && InstSlot < OldIdx) LastUse = InstSlot.getRegSlot(); Index: lib/CodeGen/LiveRangeCalc.cpp =================================================================== --- lib/CodeGen/LiveRangeCalc.cpp +++ lib/CodeGen/LiveRangeCalc.cpp @@ -182,6 +182,9 @@ // Determine the actual place of the use. const MachineInstr *MI = MO.getParent(); + if (TargetRegisterInfo::isVirtualRegister(Reg) && MI->isDebugValue()) + continue; + unsigned OpNo = (&MO - &MI->getOperand(0)); SlotIndex UseIdx; if (MI->isPHI()) { Index: lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h =================================================================== --- lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h +++ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h @@ -59,6 +59,7 @@ DotResult = UINT64_MAX - 1, ///< .result DotLocal = UINT64_MAX - 2, ///< .local DotEndFunc = UINT64_MAX - 3, ///< .endfunc + DotIndIdx = UINT64_MAX - 4, /// < .indidx }; } // end namespace WebAssembly Index: lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h =================================================================== --- lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h +++ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h @@ -43,6 +43,8 @@ size_t NumResults) { llvm_unreachable("emitIndirectFunctionType not implemented"); } + /// .indidx + virtual void emitIndIdx(const MCExpr* Value) = 0; }; /// This part is for ascii assembly output @@ -59,6 +61,7 @@ void emitIndirectFunctionType(StringRef name, SmallVectorImpl &SignatureVTs, size_t NumResults) override; + void emitIndIdx(const MCExpr* Value) override; }; /// This part is for ELF object output @@ -70,6 +73,7 @@ void emitResult(ArrayRef Types) override; void emitLocal(ArrayRef Types) override; void emitEndFunc() override; + void emitIndIdx(const MCExpr* Value) override; }; } // end namespace llvm Index: lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp =================================================================== --- lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp +++ lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp @@ -74,6 +74,10 @@ OS << "\n"; } +void WebAssemblyTargetAsmStreamer::emitIndIdx(const MCExpr* Value) { + OS << "\t.indidx \t" << *Value << '\n'; +} + // FIXME: What follows is not the real binary encoding. static void EncodeTypes(MCStreamer &Streamer, ArrayRef Types) { @@ -100,3 +104,8 @@ void WebAssemblyTargetELFStreamer::emitEndFunc() { Streamer.EmitIntValue(WebAssembly::DotEndFunc, sizeof(uint64_t)); } + +void WebAssemblyTargetELFStreamer::emitIndIdx(const MCExpr* Value) { + Streamer.EmitIntValue(WebAssembly::DotIndIdx, sizeof(uint64_t)); + Streamer.EmitValue(Value, sizeof(uint64_t)); +} Index: lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp +++ lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp @@ -183,6 +183,15 @@ SmallVector ResultVTs; const Function &F(*MF->getFunction()); + + // Emit the function index. + MDNode *Idx; + if ((Idx = F.getMetadata("wasm.index"))) { + assert(Idx->getNumOperands() == 1); + + getTargetStreamer()->emitIndIdx(AsmPrinter::lowerConstant(cast(Idx->getOperand(0))->getValue())); + } + ComputeLegalValueVTs(F, TM, F.getReturnType(), ResultVTs); // If the return type needs to be legalized it will get converted into Index: lib/Target/WebAssembly/WebAssemblyRegStackify.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyRegStackify.cpp +++ lib/Target/WebAssembly/WebAssemblyRegStackify.cpp @@ -258,6 +258,9 @@ LIS.getInstructionIndex(*Def).getRegSlot()); assert(DefVNI); for (auto I : MRI.use_nodbg_operands(Reg)) { + if (TargetRegisterInfo::isVirtualRegister(Reg) && I.getParent()->isDebugValue()) + continue; + const auto &Result = LI.Query(LIS.getInstructionIndex(*I.getParent())); if (Result.valueIn() == DefVNI) { if (!Result.isKill()) Index: lib/Target/WebAssembly/WebAssemblyStoreResults.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyStoreResults.cpp +++ lib/Target/WebAssembly/WebAssemblyStoreResults.cpp @@ -98,6 +98,9 @@ if (&MI == Where || !MDT.dominates(&MI, Where)) continue; + if (Where->isDebugValue()) + continue; + // If this use gets a different value, skip it. SlotIndex WhereIdx = LIS.getInstructionIndex(*Where); VNInfo *WhereVNI = FromLI->getVNInfoAt(WhereIdx); Index: lib/Transforms/IPO/LowerTypeTests.cpp =================================================================== --- lib/Transforms/IPO/LowerTypeTests.cpp +++ lib/Transforms/IPO/LowerTypeTests.cpp @@ -250,6 +250,10 @@ void verifyTypeMDNode(GlobalObject *GO, MDNode *Type); void buildBitSetsFromFunctions(ArrayRef TypeIds, ArrayRef Functions); + void buildBitSetsFromFunctionsX86(ArrayRef TypeIds, + ArrayRef Functions); + void buildBitSetsFromFunctionsWASM(ArrayRef TypeIds, + ArrayRef Functions); void buildBitSetsFromDisjointSet(ArrayRef TypeIds, ArrayRef Globals); bool lower(); @@ -635,9 +639,6 @@ static const unsigned kX86JumpTableEntrySize = 8; unsigned LowerTypeTests::getJumpTableEntrySize() { - if (Arch != Triple::x86 && Arch != Triple::x86_64) - report_fatal_error("Unsupported architecture for jump tables"); - return kX86JumpTableEntrySize; } @@ -648,9 +649,6 @@ Constant *LowerTypeTests::createJumpTableEntry(GlobalObject *Src, Function *Dest, unsigned Distance) { - if (Arch != Triple::x86 && Arch != Triple::x86_64) - report_fatal_error("Unsupported architecture for jump tables"); - const unsigned kJmpPCRel32Code = 0xe9; const unsigned kInt3Code = 0xcc; @@ -675,9 +673,6 @@ } Type *LowerTypeTests::getJumpTableEntryType() { - if (Arch != Triple::x86 && Arch != Triple::x86_64) - report_fatal_error("Unsupported architecture for jump tables"); - return StructType::get(M->getContext(), {Int8Ty, Int32Ty, Int8Ty, Int8Ty, Int8Ty}, /*Packed=*/true); @@ -687,6 +682,17 @@ /// for the functions, build the bit sets and lower the llvm.type.test calls. void LowerTypeTests::buildBitSetsFromFunctions(ArrayRef TypeIds, ArrayRef Functions) { + if (Arch == Triple::x86 || Arch == Triple::x86_64) { + buildBitSetsFromFunctionsX86(TypeIds, Functions); + } else if (Arch == Triple::wasm32 || Arch == Triple::wasm64 ) { + buildBitSetsFromFunctionsWASM(TypeIds, Functions); + } else { + report_fatal_error("Unsupported architecture for jump tables"); + } +} + +void LowerTypeTests::buildBitSetsFromFunctionsX86(ArrayRef TypeIds, + ArrayRef Functions) { // Unlike the global bitset builder, the function bitset builder cannot // re-arrange functions in a particular order and base its calculations on the // layout of the functions' entry points, as we have no idea how large a @@ -818,6 +824,29 @@ ConstantArray::get(JumpTableType, JumpTableEntries)); } + +// Assign a dummy layout based using an incrementing counter, and store this as +// metadata. During generation of the indexed indirect function call table, the +// backend will ensure that the appropriate indexes are used. +void LowerTypeTests::buildBitSetsFromFunctionsWASM(ArrayRef TypeIds, + ArrayRef Functions) { + static uint64_t counter = 0; + assert(!Functions.empty()); + + // Build a dummy layout. + DenseMap GlobalLayout; + for (Function *F : Functions) + GlobalLayout[F] = counter++; + + lowerTypeTestCalls(TypeIds, ConstantPointerNull::get(cast(Int32PtrTy)), GlobalLayout); + + // Generate metadata for the indirect function indexes. + for (const auto &P : GlobalLayout) { + MDNode *MD = MDNode::get(P.first->getContext(), ArrayRef(ConstantAsMetadata::get(ConstantInt::get(Int64Ty, P.second)))); + cast(P.first)->setMetadata("wasm.index", MD); + } +} + void LowerTypeTests::buildBitSetsFromDisjointSet( ArrayRef TypeIds, ArrayRef Globals) { llvm::DenseMap TypeIdIndices;