diff --git a/llvm/include/llvm/CodeGen/FastISel.h b/llvm/include/llvm/CodeGen/FastISel.h --- a/llvm/include/llvm/CodeGen/FastISel.h +++ b/llvm/include/llvm/CodeGen/FastISel.h @@ -18,6 +18,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/TargetLowering.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/CallingConv.h" @@ -204,7 +205,7 @@ MachineRegisterInfo &MRI; MachineFrameInfo &MFI; MachineConstantPool &MCP; - DebugLoc DbgLoc; + MIMetadata MIMD; const TargetMachine &TM; const DataLayout &DL; const TargetInstrInfo &TII; @@ -247,7 +248,7 @@ void finishBasicBlock(); /// Return current debug location information. - DebugLoc getCurDebugLoc() const { return DbgLoc; } + DebugLoc getCurDebugLoc() const { return MIMD.getDL(); } /// Do "fast" instruction selection for function arguments and append /// the machine instructions to the current block. Returns true when diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -319,7 +319,7 @@ Reg = lookUpRegForValue(Op); } else if (isa(V)) { Reg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::IMPLICIT_DEF), Reg); } return Reg; @@ -696,20 +696,20 @@ // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); auto Builder = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown)); const MCInstrDesc &MCID = Builder.getInstr()->getDesc(); for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I) Builder.addImm(0); // Issue STACKMAP. - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::STACKMAP)); for (auto const &MO : Ops) MIB.add(MO); // Issue CALLSEQ_END unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp)) .addImm(0) .addImm(0); @@ -878,7 +878,7 @@ /*isImp=*/true)); // Insert the patchpoint instruction before the call generated by the target. - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, MIMD, TII.get(TargetOpcode::PATCHPOINT)); for (auto &MO : Ops) @@ -907,7 +907,7 @@ Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)), /*isDef=*/false)); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::PATCHABLE_EVENT_CALL)); for (auto &MO : Ops) MIB.add(MO); @@ -928,7 +928,7 @@ Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)), /*isDef=*/false)); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL)); for (auto &MO : Ops) MIB.add(MO); @@ -1170,7 +1170,7 @@ ExtraInfo |= InlineAsm::Extra_IsConvergent; ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect; - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::INLINEASM)); MIB.addExternalSymbol(IA->getAsmString().c_str()); MIB.addImm(ExtraInfo); @@ -1250,12 +1250,12 @@ false); if (Op) { - assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && + assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) && "Expected inlined-at fields to agree"); // A dbg.declare describes the address of a source variable, so lower it // into an indirect DBG_VALUE. auto Builder = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op, DI->getVariable(), DI->getExpression()); @@ -1282,12 +1282,12 @@ const DbgValueInst *DI = cast(II); const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); const Value *V = DI->getValue(); - assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && + assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) && "Expected inlined-at fields to agree"); if (!V || isa(V) || DI->hasArgList()) { // DI is either undef or cannot produce a valid DBG_VALUE, so produce an // undef DBG_VALUE to terminate any prior location. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, false, 0U, DI->getVariable(), DI->getExpression()); } else if (const auto *CI = dyn_cast(V)) { // See if there's an expression to constant-fold. @@ -1295,19 +1295,19 @@ if (Expr) std::tie(Expr, CI) = Expr->constantFold(CI); if (CI->getBitWidth() > 64) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addCImm(CI) .addImm(0U) .addMetadata(DI->getVariable()) .addMetadata(Expr); else - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addImm(CI->getZExtValue()) .addImm(0U) .addMetadata(DI->getVariable()) .addMetadata(Expr); } else if (const auto *CF = dyn_cast(V)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addFPImm(CF) .addImm(0U) .addMetadata(DI->getVariable()) @@ -1316,8 +1316,8 @@ // FIXME: This does not handle register-indirect values at offset 0. bool IsIndirect = false; auto Builder = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg, - DI->getVariable(), DI->getExpression()); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, + IsIndirect, Reg, DI->getVariable(), DI->getExpression()); // If using instruction referencing, mutate this into a DBG_INSTR_REF, // to be later patched up by finalizeDebugInstrRefs. @@ -1339,7 +1339,7 @@ return true; } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel()); return true; } @@ -1447,7 +1447,7 @@ MVT Ty = ETy.getSimpleVT(); const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty); Register ResultReg = createResultReg(TyRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg); updateValueMap(I, ResultReg); @@ -1499,7 +1499,7 @@ if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet) return false; - DbgLoc = I->getDebugLoc(); + MIMD = MIMetadata(*I); SavedInsertPt = FuncInfo.InsertPt; @@ -1524,7 +1524,7 @@ if (!SkipTargetIndependentISel) { if (selectOperator(I, I->getOpcode())) { ++NumFastIselSuccessIndependent; - DbgLoc = DebugLoc(); + MIMD = {}; return true; } // Remove dead code. @@ -1536,7 +1536,7 @@ // Next, try calling the target to attempt to handle the instruction. if (fastSelectInstruction(I)) { ++NumFastIselSuccessTarget; - DbgLoc = DebugLoc(); + MIMD = {}; return true; } // Remove dead code. @@ -1544,7 +1544,7 @@ if (SavedInsertPt != FuncInfo.InsertPt) removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); - DbgLoc = DebugLoc(); + MIMD = {}; // Undo phi node updates, because they will be added again by SelectionDAG. if (I->isTerminator()) { // PHI node handling may have generated local value instructions. @@ -1592,7 +1592,7 @@ FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB); } - fastEmitBranch(FalseMBB, DbgLoc); + fastEmitBranch(FalseMBB, MIMD.getDL()); } /// Emit an FNeg operation. @@ -1905,7 +1905,7 @@ // If it's not legal to COPY between the register classes, something // has gone very wrong before we got here. Register NewOp = createResultReg(RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), NewOp).addReg(Op); return NewOp; } @@ -1918,7 +1918,7 @@ Register ResultReg = createResultReg(RC); const MCInstrDesc &II = TII.get(MachineInstOpcode); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg); return ResultReg; } @@ -1930,12 +1930,12 @@ Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } @@ -1952,14 +1952,14 @@ Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addReg(Op1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -1976,16 +1976,16 @@ Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1) .addReg(Op2); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addReg(Op1) .addReg(Op2); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2000,14 +2000,14 @@ Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addImm(Imm); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2022,16 +2022,16 @@ Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addImm(Imm1) .addImm(Imm2); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addImm(Imm1) .addImm(Imm2); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2045,12 +2045,12 @@ Register ResultReg = createResultReg(RC); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addFPImm(FPImm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addFPImm(FPImm); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2066,16 +2066,16 @@ Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1) .addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addReg(Op1) .addImm(Imm); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2087,11 +2087,11 @@ const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addImm(Imm); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addImm(Imm); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; @@ -2104,7 +2104,7 @@ "Cannot yet extract from physregs"); const TargetRegisterClass *RC = MRI.getRegClass(Op0); MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0, 0, Idx); return ResultReg; } @@ -2169,9 +2169,9 @@ // Set the DebugLoc for the copy. Use the location of the operand if // there is one; otherwise no location, flushLocalValueMap will fix it. - DbgLoc = DebugLoc(); + MIMD = {}; if (const auto *Inst = dyn_cast(PHIOp)) - DbgLoc = Inst->getDebugLoc(); + MIMD = MIMetadata(*Inst); Register Reg = getRegForValue(PHIOp); if (!Reg) { @@ -2179,7 +2179,7 @@ return false; } FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg)); - DbgLoc = DebugLoc(); + MIMD = {}; } } diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -356,7 +356,7 @@ if (SI != FuncInfo.StaticAllocaMap.end()) { Register ResultReg = createResultReg(&AArch64::GPR64spRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADDXri), ResultReg) .addFrameIndex(SI->second) .addImm(0) @@ -379,7 +379,7 @@ : &AArch64::GPR32RegClass; unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(ZeroReg, getKillRegState(true)); return ResultReg; } @@ -411,11 +411,11 @@ &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; Register TmpReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc1), TmpReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc1), TmpReg) .addImm(CFP->getValueAPF().bitcastToAPInt().getZExtValue()); Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(TmpReg, getKillRegState(true)); @@ -428,12 +428,12 @@ unsigned CPI = MCP.getConstantPoolIndex(cast(CFP), Alignment); Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADRP), ADRPReg).addConstantPoolIndex(CPI, 0, AArch64II::MO_PAGE); unsigned Opc = Is64Bit ? AArch64::LDRDui : AArch64::LDRSui; Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(ADRPReg) .addConstantPoolIndex(CPI, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC); return ResultReg; @@ -460,7 +460,7 @@ if (OpFlags & AArch64II::MO_GOT) { // ADRP + LDRX - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADRP), ADRPReg) .addGlobalAddress(GV, 0, AArch64II::MO_PAGE | OpFlags); @@ -472,7 +472,7 @@ ResultReg = createResultReg(&AArch64::GPR64RegClass); LdrOpc = AArch64::LDRXui; } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(LdrOpc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(LdrOpc), ResultReg) .addReg(ADRPReg) .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF | @@ -483,7 +483,7 @@ // LDRWui produces a 32-bit register, but pointers in-register are 64-bits // so we must extend the result on ILP32. Register Result64 = createResultReg(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG)) .addDef(Result64) .addImm(0) @@ -492,12 +492,12 @@ return Result64; } else { // ADRP + ADDX - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADRP), ADRPReg) .addGlobalAddress(GV, 0, AArch64II::MO_PAGE | OpFlags); ResultReg = createResultReg(&AArch64::GPR64spRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADDXri), ResultReg) .addReg(ADRPReg) .addGlobalAddress(GV, 0, @@ -1035,7 +1035,7 @@ if ((ImmediateOffsetNeedsLowering || Addr.getOffsetReg()) && Addr.isFIBase()) { Register ResultReg = createResultReg(&AArch64::GPR64spRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADDXri), ResultReg) .addFrameIndex(Addr.getFI()) .addImm(0) @@ -1308,7 +1308,7 @@ const MCInstrDesc &II = TII.get(Opc); LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(LHSReg) .addReg(RHSReg); return ResultReg; @@ -1352,7 +1352,7 @@ const MCInstrDesc &II = TII.get(Opc); LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(LHSReg) .addImm(Imm) .addImm(getShifterImm(AArch64_AM::LSL, ShiftImm)); @@ -1394,7 +1394,7 @@ const MCInstrDesc &II = TII.get(Opc); LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(LHSReg) .addReg(RHSReg) .addImm(getShifterImm(ShiftType, ShiftImm)); @@ -1438,7 +1438,7 @@ const MCInstrDesc &II = TII.get(Opc); LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(LHSReg) .addReg(RHSReg) .addImm(getArithExtendImm(ExtType, ShiftImm)); @@ -1495,7 +1495,7 @@ if (UseImm) { unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDri : AArch64::FCMPSri; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(LHSReg); return true; } @@ -1505,7 +1505,7 @@ return false; unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDrr : AArch64::FCMPSrr; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(LHSReg) .addReg(RHSReg); return true; @@ -1842,7 +1842,7 @@ // Create the base instruction, then add the operands. Register ResultReg = createResultReg(RC); - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); addLoadStoreOperands(Addr, MIB, MachineMemOperand::MOLoad, ScaleFactor, MMO); @@ -1857,7 +1857,7 @@ // the 32bit reg to a 64bit reg. if (WantZExt && RetVT == MVT::i64 && VT <= MVT::i32) { Register Reg64 = createResultReg(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), Reg64) .addImm(0) .addReg(ResultReg, getKillRegState(true)) @@ -2048,7 +2048,7 @@ const MCInstrDesc &II = TII.get(Opc); SrcReg = constrainOperandRegClass(II, SrcReg, 0); AddrReg = constrainOperandRegClass(II, AddrReg, 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(SrcReg) .addReg(AddrReg) .addMemOperand(MMO); @@ -2117,7 +2117,7 @@ const MCInstrDesc &II = TII.get(Opc); SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs()); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(SrcReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addReg(SrcReg); addLoadStoreOperands(Addr, MIB, MachineMemOperand::MOStore, ScaleFactor, MMO); return true; @@ -2352,7 +2352,7 @@ // Emit the combined compare and branch instruction. SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs()); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(SrcReg); if (IsBitTest) MIB.addImm(TestBit); @@ -2381,10 +2381,10 @@ default: break; case CmpInst::FCMP_FALSE: - fastEmitBranch(FBB, DbgLoc); + fastEmitBranch(FBB, MIMD.getDL()); return true; case CmpInst::FCMP_TRUE: - fastEmitBranch(TBB, DbgLoc); + fastEmitBranch(TBB, MIMD.getDL()); return true; } @@ -2422,13 +2422,13 @@ // Emit the extra branch for FCMP_UEQ and FCMP_ONE. if (ExtraCC != AArch64CC::AL) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::Bcc)) .addImm(ExtraCC) .addMBB(TBB); } // Emit the branch. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::Bcc)) .addImm(CC) .addMBB(TBB); @@ -2438,7 +2438,7 @@ } else if (const auto *CI = dyn_cast(BI->getCondition())) { uint64_t Imm = CI->getZExtValue(); MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::B)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::B)) .addMBB(Target); // Obtain the branch probability and add the target to the successor list. @@ -2459,7 +2459,7 @@ return false; // Emit the branch. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::Bcc)) .addImm(CC) .addMBB(TBB); @@ -2482,7 +2482,7 @@ const MCInstrDesc &II = TII.get(Opcode); Register ConstrainedCondReg = constrainOperandRegClass(II, CondReg, II.getNumDefs()); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(ConstrainedCondReg) .addImm(0) .addMBB(TBB); @@ -2500,7 +2500,7 @@ // Emit the indirect branch. const MCInstrDesc &II = TII.get(AArch64::BR); AddrReg = constrainOperandRegClass(II, AddrReg, II.getNumDefs()); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(AddrReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addReg(AddrReg); // Make sure the CFG is up-to-date. for (const auto *Succ : BI->successors()) @@ -2524,7 +2524,7 @@ break; case CmpInst::FCMP_FALSE: ResultReg = createResultReg(&AArch64::GPR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(AArch64::WZR, getKillRegState(true)); break; @@ -2564,12 +2564,12 @@ if (CondCodes) { Register TmpReg1 = createResultReg(&AArch64::GPR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::CSINCWr), TmpReg1) .addReg(AArch64::WZR, getKillRegState(true)) .addReg(AArch64::WZR, getKillRegState(true)) .addImm(CondCodes[0]); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::CSINCWr), ResultReg) .addReg(TmpReg1, getKillRegState(true)) .addReg(AArch64::WZR, getKillRegState(true)) @@ -2583,7 +2583,7 @@ AArch64CC::CondCode CC = getCompareCC(Predicate); assert((CC != AArch64CC::AL) && "Unexpected condition code."); AArch64CC::CondCode invertedCC = getInvertedCondCode(CC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::CSINCWr), ResultReg) .addReg(AArch64::WZR, getKillRegState(true)) .addReg(AArch64::WZR, getKillRegState(true)) @@ -2747,7 +2747,7 @@ CondReg = constrainOperandRegClass(II, CondReg, 1); // Emit a TST instruction (ANDS wzr, reg, #imm). - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, AArch64::WZR) .addReg(CondReg) .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); @@ -2777,7 +2777,7 @@ return false; Register ResultReg = createResultReg(&AArch64::FPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::FCVTDSr), ResultReg).addReg(Op); updateValueMap(I, ResultReg); return true; @@ -2793,7 +2793,7 @@ return false; Register ResultReg = createResultReg(&AArch64::FPR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::FCVTSDr), ResultReg).addReg(Op); updateValueMap(I, ResultReg); return true; @@ -2827,7 +2827,7 @@ } Register ResultReg = createResultReg( DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(SrcReg); updateValueMap(I, ResultReg); return true; @@ -2980,7 +2980,7 @@ // Without this, EmitLiveInCopies may eliminate the livein if its only // use is a bitcast (which isn't turned into an instruction). Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(DstReg, getKillRegState(true)); updateValueMap(&Arg, ResultReg); @@ -3001,7 +3001,7 @@ // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown)) .addImm(NumBytes).addImm(0); // Process the args. @@ -3041,7 +3041,7 @@ // Now copy/store arg to correct locations. if (VA.isRegLoc() && !VA.needsCustom()) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg); CLI.OutRegs.push_back(VA.getLocReg()); } else if (VA.needsCustom()) { @@ -3084,7 +3084,7 @@ // Issue CALLSEQ_END unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp)) .addImm(NumBytes).addImm(0); // Now the return value. @@ -3105,7 +3105,7 @@ return false; Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(RVLocs[0].getLocReg()); CLI.InRegs.push_back(RVLocs[0].getLocReg()); @@ -3215,7 +3215,7 @@ if (Subtarget->useSmallAddressing()) { const MCInstrDesc &II = TII.get(Addr.getReg() ? getBLRCallOpcode(*MF) : (unsigned)AArch64::BL); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II); if (Symbol) MIB.addSym(Symbol, 0); else if (Addr.getGlobalValue()) @@ -3229,12 +3229,12 @@ unsigned CallReg = 0; if (Symbol) { Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADRP), ADRPReg) .addSym(Symbol, AArch64II::MO_GOT | AArch64II::MO_PAGE); CallReg = createResultReg(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::LDRXui), CallReg) .addReg(ADRPReg) .addSym(Symbol, @@ -3249,7 +3249,7 @@ const MCInstrDesc &II = TII.get(getBLRCallOpcode(*MF)); CallReg = constrainOperandRegClass(II, CallReg, 0); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(CallReg); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addReg(CallReg); } // Add implicit physical register uses to the call. @@ -3426,7 +3426,7 @@ const AArch64RegisterInfo *RegInfo = Subtarget->getRegisterInfo(); Register FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); Register SrcReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), SrcReg).addReg(FramePtr); // Recursively load frame address // ldr x0, [fp] @@ -3451,7 +3451,7 @@ // SP = FP + Fixed Object + 16 int FI = MFI.CreateFixedObject(4, 0, false); Register ResultReg = createResultReg(&AArch64::GPR64spRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ADDXri), ResultReg) .addFrameIndex(FI) .addImm(0) @@ -3584,17 +3584,17 @@ if (!SrcReg) return false; Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(SrcReg); updateValueMap(II, ResultReg); return true; } case Intrinsic::trap: - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::BRK)) .addImm(1); return true; case Intrinsic::debugtrap: - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::BRK)) .addImm(0xF000); return true; @@ -3724,7 +3724,7 @@ if (VT == MVT::i32) { MulReg = emitUMULL_rr(MVT::i64, LHSReg, RHSReg); // tst xreg, #0xffffffff00000000 - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::ANDSXri), AArch64::XZR) .addReg(MulReg) .addImm(AArch64_AM::encodeLogicalImmediate(0xFFFFFFFF00000000, 64)); @@ -3743,7 +3743,7 @@ if (MulReg) { ResultReg1 = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg1).addReg(MulReg); } @@ -3855,14 +3855,14 @@ SrcReg = emitAnd_ri(MVT::i64, SrcReg, 0xffffffff); // Make the copy. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg); // Add register to return instruction. RetRegs.push_back(VA.getLocReg()); } - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::RET_ReallyLR)); for (unsigned RetReg : RetRegs) MIB.addReg(RetReg, RegState::Implicit); @@ -3925,7 +3925,7 @@ assert(ResultReg && "Unexpected AND instruction emission failure."); } else { ResultReg = createResultReg(&AArch64::GPR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(SrcReg); } @@ -3949,7 +3949,7 @@ // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the // upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd. Register Reg64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), Reg64) .addImm(0) .addReg(ResultReg) @@ -4046,7 +4046,7 @@ if (Shift == 0) { if (RetVT == SrcVT) { Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(Op0); return ResultReg; @@ -4093,7 +4093,7 @@ unsigned Opc = OpcTable[IsZExt][Is64Bit]; if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { Register TmpReg = MRI.createVirtualRegister(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), TmpReg) .addImm(0) .addReg(Op0) @@ -4149,7 +4149,7 @@ if (Shift == 0) { if (RetVT == SrcVT) { Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(Op0); return ResultReg; @@ -4209,7 +4209,7 @@ unsigned Opc = OpcTable[IsZExt][Is64Bit]; if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { Register TmpReg = MRI.createVirtualRegister(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), TmpReg) .addImm(0) .addReg(Op0) @@ -4265,7 +4265,7 @@ if (Shift == 0) { if (RetVT == SrcVT) { Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(Op0); return ResultReg; @@ -4314,7 +4314,7 @@ unsigned Opc = OpcTable[IsZExt][Is64Bit]; if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { Register TmpReg = MRI.createVirtualRegister(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), TmpReg) .addImm(0) .addReg(Op0) @@ -4372,7 +4372,7 @@ DestVT = MVT::i32; else if (DestVT == MVT::i64) { Register Src64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), Src64) .addImm(0) .addReg(SrcReg) @@ -4469,7 +4469,7 @@ if (IsZExt) { Register Reg64 = createResultReg(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), Reg64) .addImm(0) .addReg(Reg, getKillRegState(true)) @@ -4512,7 +4512,7 @@ if ((IsZExt && Arg->hasZExtAttr()) || (!IsZExt && Arg->hasSExtAttr())) { if (RetVT == MVT::i64 && SrcVT != MVT::i64) { Register ResultReg = createResultReg(&AArch64::GPR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::SUBREG_TO_REG), ResultReg) .addImm(0) .addReg(SrcReg) @@ -5007,20 +5007,20 @@ const Register ScratchReg = createResultReg(&AArch64::GPR32RegClass); // FIXME: MachineMemOperand doesn't support cmpxchg yet. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addDef(ResultReg1) .addDef(ScratchReg) .addUse(AddrReg) .addUse(DesiredReg) .addUse(NewReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CmpOpc)) .addDef(VT == MVT::i32 ? AArch64::WZR : AArch64::XZR) .addUse(ResultReg1) .addUse(DesiredReg) .addImm(0); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::CSINCWr)) .addDef(ResultReg2) .addUse(AArch64::WZR) .addUse(AArch64::WZR) diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -303,12 +303,12 @@ // for this instruction. Op0 = constrainOperandRegClass(II, Op0, 1); if (II.getNumDefs() >= 1) { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg).addReg(Op0)); } else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -328,14 +328,14 @@ if (II.getNumDefs() >= 1) { AddOptionalDefs( - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1)); } else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addReg(Op1)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -353,14 +353,14 @@ Op0 = constrainOperandRegClass(II, Op0, 1); if (II.getNumDefs() >= 1) { AddOptionalDefs( - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addImm(Imm)); } else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addImm(Imm)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -374,12 +374,12 @@ const MCInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg).addImm(Imm)); } else { - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addImm(Imm)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); } @@ -392,7 +392,7 @@ if (VT == MVT::f64) return 0; Register MoveReg = createResultReg(TLI.getRegClassFor(VT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVSR), MoveReg) .addReg(SrcReg)); return MoveReg; @@ -402,7 +402,7 @@ if (VT == MVT::i64) return 0; Register MoveReg = createResultReg(TLI.getRegClassFor(VT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVRS), MoveReg) .addReg(SrcReg)); return MoveReg; @@ -428,7 +428,7 @@ Opc = ARM::FCONSTS; } Register DestReg = createResultReg(TLI.getRegClassFor(VT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg).addImm(Imm)); return DestReg; } @@ -444,7 +444,7 @@ // The extra reg is for addrmode5. AddOptionalDefs( - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addConstantPoolIndex(Idx) .addReg(0)); return DestReg; @@ -462,7 +462,7 @@ const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; Register ImmReg = createResultReg(RC); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ImmReg) .addImm(CI->getZExtValue())); return ImmReg; @@ -478,7 +478,7 @@ const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; Register ImmReg = createResultReg(RC); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ImmReg) .addImm(Imm)); return ImmReg; @@ -501,13 +501,13 @@ unsigned Idx = MCP.getConstantPoolIndex(C, Alignment); ResultReg = createResultReg(TLI.getRegClassFor(VT)); if (isThumb2) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::t2LDRpci), ResultReg) .addConstantPoolIndex(Idx)); else { // The extra immediate is for addrmode2. ResultReg = constrainOperandRegClass(TII.get(ARM::LDRcp), ResultReg, 0); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::LDRcp), ResultReg) .addConstantPoolIndex(Idx) .addImm(0)); @@ -551,7 +551,7 @@ Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; else Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF)); } else { // MachineConstantPool wants an explicit alignment. @@ -572,7 +572,7 @@ MachineInstrBuilder MIB; if (isThumb2) { unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg).addConstantPoolIndex(Idx); if (IsPositionIndependent) MIB.addImm(Id); @@ -580,7 +580,7 @@ } else { // The extra immediate is for addrmode2. DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::LDRcp), DestReg) .addConstantPoolIndex(Idx) .addImm(0); @@ -591,7 +591,7 @@ Register NewDestReg = createResultReg(TLI.getRegClassFor(VT)); MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DbgLoc, TII.get(Opc), NewDestReg) + MIMD, TII.get(Opc), NewDestReg) .addReg(DestReg) .addImm(Id); AddOptionalDefs(MIB); @@ -605,12 +605,12 @@ MachineInstrBuilder MIB; Register NewDestReg = createResultReg(TLI.getRegClassFor(VT)); if (isThumb2) - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::t2LDRi12), NewDestReg) .addReg(DestReg) .addImm(0); else - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::LDRi12), NewDestReg) .addReg(DestReg) .addImm(0); @@ -658,7 +658,7 @@ Register ResultReg = createResultReg(RC); ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addFrameIndex(SI->second) .addImm(0)); @@ -832,7 +832,7 @@ : &ARM::GPRRegClass; Register ResultReg = createResultReg(RC); unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addFrameIndex(Addr.Base.FI) .addImm(0)); @@ -985,7 +985,7 @@ if (allocReg) ResultReg = createResultReg(RC); assert(ResultReg > 255 && "Expected an allocated virtual register."); - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); @@ -993,7 +993,7 @@ // load. Now we must move from the GRP to the FP register. if (needVMOV) { Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVSR), MoveReg) .addReg(ResultReg)); ResultReg = MoveReg; @@ -1049,7 +1049,7 @@ : &ARM::GPRRegClass); unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), Res) .addReg(SrcReg).addImm(1)); SrcReg = Res; @@ -1099,7 +1099,7 @@ // Unaligned stores need special handling. Floats require word-alignment. if (Alignment && *Alignment < Align(4)) { Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVRS), MoveReg) .addReg(SrcReg)); SrcReg = MoveReg; @@ -1125,7 +1125,7 @@ // Create the base instruction, then add the operands. SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0); - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(StrOpc)) .addReg(SrcReg); AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); @@ -1250,7 +1250,7 @@ return false; unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc)) .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); finishCondBranch(BI->getParent(), TBB, FBB); return true; @@ -1262,7 +1262,7 @@ unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; Register OpReg = getRegForValue(TI->getOperand(0)); OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc)) .addReg(OpReg).addImm(1)); @@ -1273,7 +1273,7 @@ } unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc)) .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); finishCondBranch(BI->getParent(), TBB, FBB); @@ -1283,7 +1283,7 @@ dyn_cast(BI->getCondition())) { uint64_t Imm = CI->getZExtValue(); MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; - fastEmitBranch(Target, DbgLoc); + fastEmitBranch(Target, MIMD.getDL()); return true; } @@ -1300,7 +1300,7 @@ unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0); AddOptionalDefs( - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc)) .addReg(CmpReg) .addImm(1)); @@ -1311,7 +1311,7 @@ } unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc)) .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); finishCondBranch(BI->getParent(), TBB, FBB); return true; @@ -1324,7 +1324,7 @@ unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; assert(isThumb2 || Subtarget->hasV4TOps()); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)).addReg(AddrReg)); const IndirectBrInst *IB = cast(I); @@ -1432,11 +1432,11 @@ SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0); if (!UseImm) { SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(SrcReg1).addReg(SrcReg2)); } else { MachineInstrBuilder MIB; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(SrcReg1); // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. @@ -1448,7 +1448,7 @@ // For floating point we need to move the result to a comparison register // that we can then use for branches. if (Ty->isFloatTy() || Ty->isDoubleTy()) - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::FMSTAT))); return true; } @@ -1475,7 +1475,7 @@ Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); unsigned ZeroReg = fastMaterializeConstant(Zero); // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc), DestReg) .addReg(ZeroReg).addImm(1) .addImm(ARMPred).addReg(ARM::CPSR); @@ -1495,7 +1495,7 @@ if (Op == 0) return false; Register Result = createResultReg(&ARM::DPRRegClass); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VCVTDS), Result) .addReg(Op)); updateValueMap(I, Result); @@ -1514,7 +1514,7 @@ if (Op == 0) return false; Register Result = createResultReg(&ARM::SPRRegClass); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VCVTSD), Result) .addReg(Op)); updateValueMap(I, Result); @@ -1560,7 +1560,7 @@ else return false; Register ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg).addReg(FP)); updateValueMap(I, ResultReg); return true; @@ -1587,7 +1587,7 @@ // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. Register ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg).addReg(Op)); // This result needs to be in an integer register, but the conversion only @@ -1636,7 +1636,7 @@ unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; CondReg = constrainOperandRegClass(TII.get(TstOpc), CondReg, 0); AddOptionalDefs( - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc)) .addReg(CondReg) .addImm(1)); @@ -1656,7 +1656,7 @@ if (!UseImm) { Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1); Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc), ResultReg) .addReg(Op2Reg) .addReg(Op1Reg) @@ -1664,7 +1664,7 @@ .addReg(ARM::CPSR); } else { Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc), ResultReg) .addReg(Op1Reg) .addImm(Imm) @@ -1766,7 +1766,7 @@ Register ResultReg = createResultReg(&ARM::GPRnopcRegClass); SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1); SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(SrcReg1).addReg(SrcReg2)); updateValueMap(I, ResultReg); @@ -1813,7 +1813,7 @@ if (Op2 == 0) return false; Register ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(Op1).addReg(Op2)); updateValueMap(I, ResultReg); @@ -1932,7 +1932,7 @@ // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown)) .addImm(NumBytes).addImm(0)); @@ -1977,7 +1977,7 @@ // Now copy/store arg to correct locations. if (VA.isRegLoc() && !VA.needsCustom()) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg); RegArgs.push_back(VA.getLocReg()); } else if (VA.needsCustom()) { @@ -1991,7 +1991,7 @@ assert(VA.isRegLoc() && NextVA.isRegLoc() && "We only handle register args!"); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVRRD), VA.getLocReg()) .addReg(NextVA.getLocReg(), RegState::Define) .addReg(Arg)); @@ -2023,7 +2023,7 @@ unsigned &NumBytes, bool isVarArg) { // Issue CALLSEQ_END unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp)) .addImm(NumBytes).addImm(-1ULL)); @@ -2040,7 +2040,7 @@ MVT DestVT = RVLocs[0].getValVT(); const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); Register ResultReg = createResultReg(DstRC); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::VMOVDRR), ResultReg) .addReg(RVLocs[0].getLocReg()) .addReg(RVLocs[1].getLocReg())); @@ -2061,7 +2061,7 @@ const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); Register ResultReg = createResultReg(DstRC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(RVLocs[0].getLocReg()); UsedRegs.push_back(RVLocs[0].getLocReg()); @@ -2147,7 +2147,7 @@ // Avoid a cross-class copy. This is very unlikely. if (!SrcRC->contains(DstReg)) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); // Add register to return instruction. @@ -2163,7 +2163,7 @@ else RetOpc = Subtarget->getReturnOpcode(); - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(RetOpc)); AddOptionalDefs(MIB); for (unsigned R : RetRegs) @@ -2261,7 +2261,7 @@ // Issue the call. unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls()); MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DbgLoc, TII.get(CallOpc)); + MIMD, TII.get(CallOpc)); // BL / BLX don't take a predicate, but tBL / tBLX do. if (isThumb2) MIB.add(predOps(ARMCC::AL)); @@ -2402,7 +2402,7 @@ // Issue the call. unsigned CallOpc = ARMSelectCallOp(UseReg); MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DbgLoc, TII.get(CallOpc)); + MIMD, TII.get(CallOpc)); // ARM calls don't take a predicate, but tBL / tBLX do. if(isThumb2) @@ -2508,7 +2508,7 @@ unsigned Depth = cast(I.getOperand(0))->getZExtValue(); while (Depth--) { DestReg = createResultReg(RC); - AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(LdrOpc), DestReg) .addReg(SrcReg).addImm(0)); SrcReg = DestReg; @@ -2571,7 +2571,7 @@ Opcode = ARM::tTRAP; else Opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opcode)); return true; } } @@ -2723,7 +2723,7 @@ unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; bool isKill = 1 == Instr; MachineInstrBuilder MIB = BuildMI( - *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg); + *FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opcode), ResultReg); if (setsCPSR) MIB.addReg(ARM::CPSR, RegState::Define); SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); @@ -2803,7 +2803,7 @@ Register ResultReg = createResultReg(&ARM::GPRnopcRegClass); if(ResultReg == 0) return false; - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(Reg1); @@ -2970,7 +2970,7 @@ Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass); unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp; MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), TempReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), TempReg) .addConstantPoolIndex(Idx) .addMemOperand(CPMMO); if (Opc == ARM::LDRcp) @@ -2982,7 +2982,7 @@ Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR : ARM::PICADD; DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addReg(TempReg) .addImm(ARMPCLabelIndex); @@ -2991,7 +2991,7 @@ if (UseGOT_PREL && Subtarget->isThumb()) { Register NewDestReg = createResultReg(TLI.getRegClassFor(VT)); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(ARM::t2LDRi12), NewDestReg) .addReg(DestReg) .addImm(0); @@ -3065,7 +3065,7 @@ // Without this, EmitLiveInCopies may eliminate the livein if its only // use is a bitcast (which isn't turned into an instruction). Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(DstReg, getKillRegState(true)); updateValueMap(&Arg, ResultReg); diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -204,11 +204,11 @@ unsigned materializeExternalCallSym(MCSymbol *Syn); MachineInstrBuilder emitInst(unsigned Opc) { - return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); + return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)); } MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) { - return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DstReg); } @@ -338,7 +338,7 @@ if (SI != FuncInfo.StaticAllocaMap.end()) { Register ResultReg = createResultReg(&Mips::GPR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LEA_ADDiu), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::LEA_ADDiu), ResultReg) .addFrameIndex(SI->second) .addImm(0); @@ -794,7 +794,7 @@ MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align(4)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addFrameIndex(FI) .addImm(Offset) .addMemOperand(MMO); @@ -843,7 +843,7 @@ MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align(4)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(SrcReg) .addFrameIndex(FI) .addImm(Offset) @@ -967,7 +967,7 @@ return false; } - BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::BGTZ)) + BuildMI(*BrBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::BGTZ)) .addReg(ZExtCondReg) .addMBB(TBB); finishCondBranch(BI->getParent(), TBB, FBB); @@ -1221,7 +1221,7 @@ // Now copy/store arg to correct locations. if (VA.isRegLoc() && !VA.needsCustom()) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg); CLI.OutRegs.push_back(VA.getLocReg()); } else if (VA.needsCustom()) { @@ -1291,7 +1291,7 @@ Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT)); if (!ResultReg) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(RVLocs[0].getLocReg()); CLI.InRegs.push_back(RVLocs[0].getLocReg()); @@ -1461,7 +1461,7 @@ // Without this, EmitLiveInCopies may eliminate the livein if its only // use is a bitcast (which isn't turned into an instruction). Register ResultReg = createResultReg(Allocation[ArgNo].RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(DstReg, getKillRegState(true)); updateValueMap(&FormalArg, ResultReg); @@ -1550,7 +1550,7 @@ DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32); emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::JALR), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::JALR), Mips::RA).addReg(Mips::T9); // Add implicit physical register uses to the call. @@ -1756,7 +1756,7 @@ } // Make the copy. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg); // Add register to return instruction. @@ -2127,7 +2127,7 @@ const MCInstrDesc &II = TII.get(MachineInstOpcode); Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1) .addReg(Mips::HI0, RegState::ImplicitDefine | RegState::Dead) diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -151,7 +151,7 @@ unsigned SrcReg, unsigned Flag = 0, unsigned SubReg = 0) { Register TmpReg = createResultReg(ToRC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), TmpReg).addReg(SrcReg, Flag, SubReg); return TmpReg; } @@ -429,7 +429,7 @@ // register and continue. This should almost never happen. if (!UseOffset && Addr.BaseType == Address::FrameIndexBase) { Register ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ADDI8), ResultReg).addFrameIndex(Addr.Base.FI).addImm(0); Addr.Base.Reg = ResultReg; Addr.BaseType = Address::RegBase; @@ -533,7 +533,7 @@ MachineMemOperand::MOLoad, MFI.getObjectSize(Addr.Base.FI), MFI.getObjectAlign(Addr.Base.FI)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO); // Base reg with offset in range. @@ -541,7 +541,7 @@ // VSX only provides an indexed load. if (Is32VSXLoad || Is64VSXLoad) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addImm(Addr.Offset).addReg(Addr.Base.Reg); // Indexed form. @@ -568,7 +568,7 @@ case PPC::SPELWZ: Opc = PPC::SPELWZX; break; } - auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); // If we have an index register defined we use it in the store inst, @@ -679,7 +679,7 @@ MachineMemOperand::MOStore, MFI.getObjectSize(Addr.Base.FI), MFI.getObjectAlign(Addr.Base.FI)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(SrcReg) .addImm(Addr.Offset) .addFrameIndex(Addr.Base.FI) @@ -691,7 +691,7 @@ if (Is32VSXStore || Is64VSXStore) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(SrcReg).addImm(Addr.Offset).addReg(Addr.Base.Reg); // Indexed form. @@ -714,7 +714,7 @@ case PPC::SPESTW: Opc = PPC::SPESTWX; break; } - auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addReg(SrcReg); // If we have an index register defined we use it in the store inst, @@ -788,7 +788,7 @@ CondReg, PPCPred)) return false; - BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::BCC)) + BuildMI(*BrBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::BCC)) .addImm(Subtarget->hasSPE() ? PPC::PRED_SPE : PPCPred) .addReg(CondReg) .addMBB(TBB); @@ -799,7 +799,7 @@ dyn_cast(BI->getCondition())) { uint64_t Imm = CI->getZExtValue(); MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; - fastEmitBranch(Target, DbgLoc); + fastEmitBranch(Target, MIMD.getDL()); return true; } @@ -942,10 +942,10 @@ } if (!UseImm) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CmpOpc), DestReg) .addReg(SrcReg1).addReg(SrcReg2); else - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CmpOpc), DestReg) .addReg(SrcReg1).addImm(Imm); return true; @@ -987,18 +987,18 @@ auto RC = MRI.getRegClass(SrcReg); if (Subtarget->hasSPE()) { DestReg = createResultReg(&PPC::GPRCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::EFSCFD), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::EFSCFD), DestReg) .addReg(SrcReg); } else if (Subtarget->hasP8Vector() && isVSFRCRegClass(RC)) { DestReg = createResultReg(&PPC::VSSRCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::XSRSP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::XSRSP), DestReg) .addReg(SrcReg); } else { SrcReg = copyRegToRegClass(&PPC::F8RCRegClass, SrcReg); DestReg = createResultReg(&PPC::F4RCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::FRSP), DestReg) .addReg(SrcReg); } @@ -1093,7 +1093,7 @@ Register DestReg = createResultReg(&PPC::SPERCRegClass); // Generate the convert. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addReg(SrcReg); updateValueMap(I, DestReg); return true; @@ -1137,7 +1137,7 @@ Opc = IsSigned ? PPC::FCFID : PPC::FCFIDU; // Generate the convert. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addReg(FPReg); updateValueMap(I, DestReg); @@ -1248,7 +1248,7 @@ } // Generate the convert. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addReg(SrcReg); // Now move the integer value from a float register to an integer register. @@ -1344,7 +1344,7 @@ } if (UseImm) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(SrcReg1) .addImm(Imm); @@ -1362,7 +1362,7 @@ if (ISDOpcode == ISD::SUB) std::swap(SrcReg1, SrcReg2); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(SrcReg1).addReg(SrcReg2); updateValueMap(I, ResultReg); return true; @@ -1415,7 +1415,7 @@ NumBytes = std::max(NumBytes, LinkageSize + 64); // Issue CALLSEQ_START. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TII.getCallFrameSetupOpcode())) .addImm(NumBytes).addImm(0); @@ -1476,7 +1476,7 @@ } else ArgReg = NextGPR++; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ArgReg).addReg(Arg); RegArgs.push_back(ArgReg); } @@ -1490,7 +1490,7 @@ CallingConv::ID CC = CLI.CallConv; // Issue CallSEQ_END. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TII.getCallFrameDestroyOpcode())) .addImm(NumBytes).addImm(0); @@ -1523,7 +1523,7 @@ // If necessary, round the floating result to single precision. } else if (CopyVT == MVT::f64) { ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::FRSP), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::FRSP), ResultReg).addReg(SourcePhysReg); // If only the low half of a general register is needed, generate @@ -1657,13 +1657,13 @@ // the call we generate here will be erased by FastISel::selectPatchpoint, // so don't try very hard... if (CLI.IsPatchPoint) - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::NOP)); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::NOP)); else return false; } else { // Build direct call with NOP for TOC restore. // FIXME: We can and should optimize away the NOP for local calls. - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::BL8_NOP)); // Add callee. MIB.addGlobalAddress(GV); @@ -1728,7 +1728,7 @@ unsigned SrcReg = PPCMaterializeInt(CI, MVT::i64, VA.getLocInfo() != CCValAssign::ZExt); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), RetReg).addReg(SrcReg); RetRegs.push_back(RetReg); @@ -1785,14 +1785,14 @@ } } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), RetRegs[i]) .addReg(SrcReg); } } } - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::BLR8)); for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) @@ -1822,7 +1822,7 @@ assert(DestVT == MVT::i64 && "Signed extend from i32 to i32??"); Opc = PPC::EXTSW_32_64; } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addReg(SrcReg); // Unsigned 32-bit extensions use RLWINM. @@ -1834,7 +1834,7 @@ assert(SrcVT == MVT::i16 && "Unsigned extend from i32 to i32??"); MB = 16; } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::RLWINM), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::RLWINM), DestReg) .addReg(SrcReg).addImm(/*SH=*/0).addImm(MB).addImm(/*ME=*/31); @@ -1847,7 +1847,7 @@ MB = 48; else MB = 32; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::RLDICL_32_64), DestReg) .addReg(SrcReg).addImm(/*SH=*/0).addImm(MB); } @@ -1861,9 +1861,9 @@ if (AddrReg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::MTCTR8)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::MTCTR8)) .addReg(AddrReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::BCTR8)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::BCTR8)); const IndirectBrInst *IB = cast(I); for (const BasicBlock *SuccBB : IB->successors()) @@ -2022,26 +2022,26 @@ PPCFuncInfo->setUsesTOCBasePtr(); // For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)). if (CModel == CodeModel::Small) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocCPT), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::LDtocCPT), TmpReg) .addConstantPoolIndex(Idx).addReg(PPC::X2); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addImm(0).addReg(TmpReg).addMemOperand(MMO); } else { // Otherwise we generate LF[SD](Idx[lo], ADDIStocHA8(X2, Idx)). - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ADDIStocHA8), TmpReg).addReg(PPC::X2).addConstantPoolIndex(Idx); // But for large code model, we must generate a LDtocL followed // by the LF[SD]. if (CModel == CodeModel::Large) { Register TmpReg2 = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocL), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::LDtocL), TmpReg2).addConstantPoolIndex(Idx).addReg(TmpReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addImm(0) .addReg(TmpReg2); } else - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg) .addConstantPoolIndex(Idx, 0, PPCII::MO_TOC_LO) .addReg(TmpReg) .addMemOperand(MMO); @@ -2083,7 +2083,7 @@ PPCFuncInfo->setUsesTOCBasePtr(); // For small code model, generate a simple TOC load. if (CModel == CodeModel::Small) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtoc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::LDtoc), DestReg) .addGlobalAddress(GV) .addReg(PPC::X2); @@ -2097,15 +2097,15 @@ // ADDItocL(ADDIStocHA8(%x2, GV), GV) // Either way, start with the ADDIStocHA8: Register HighPartReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ADDIStocHA8), HighPartReg).addReg(PPC::X2).addGlobalAddress(GV); if (Subtarget->isGVIndirectSymbol(GV)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocL), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::LDtocL), DestReg).addGlobalAddress(GV).addReg(HighPartReg); } else { // Otherwise generate the ADDItocL. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDItocL), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ADDItocL), DestReg).addReg(HighPartReg).addGlobalAddress(GV); } } @@ -2124,21 +2124,21 @@ bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass); if (isInt<16>(Imm)) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(IsGPRC ? PPC::LI : PPC::LI8), ResultReg) .addImm(Imm); else if (Lo) { // Both Lo and Hi have nonzero bits. Register TmpReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), TmpReg) .addImm(Hi); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(IsGPRC ? PPC::ORI : PPC::ORI8), ResultReg) .addReg(TmpReg).addImm(Lo); } else // Just Hi bits. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), ResultReg) .addImm(Hi); @@ -2178,7 +2178,7 @@ unsigned TmpReg2; if (Imm) { TmpReg2 = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::RLDICR), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::RLDICR), TmpReg2).addReg(TmpReg1).addImm(Shift).addImm(63 - Shift); } else TmpReg2 = TmpReg1; @@ -2186,14 +2186,14 @@ unsigned TmpReg3, Hi, Lo; if ((Hi = (Remainder >> 16) & 0xFFFF)) { TmpReg3 = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ORIS8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ORIS8), TmpReg3).addReg(TmpReg2).addImm(Hi); } else TmpReg3 = TmpReg2; if ((Lo = Remainder & 0xFFFF)) { Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ORI8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ORI8), ResultReg).addReg(TmpReg3).addImm(Lo); return ResultReg; } @@ -2209,7 +2209,7 @@ // case first. if (VT == MVT::i1 && Subtarget->useCRBits()) { Register ImmReg = createResultReg(&PPC::CRBITRCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CI->isZero() ? PPC::CRUNSET : PPC::CRSET), ImmReg); return ImmReg; } @@ -2229,7 +2229,7 @@ if (isInt<16>(Imm)) { unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI; Register ImmReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ImmReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ImmReg) .addImm(Imm); return ImmReg; } @@ -2281,7 +2281,7 @@ if (SI != FuncInfo.StaticAllocaMap.end()) { Register ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::ADDI8), ResultReg).addFrameIndex(SI->second).addImm(0); return ResultReg; } @@ -2391,7 +2391,7 @@ // case first. if (VT == MVT::i1 && Subtarget->useCRBits()) { Register ImmReg = createResultReg(&PPC::CRBITRCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Imm == 0 ? PPC::CRUNSET : PPC::CRSET), ImmReg); return ImmReg; } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -388,7 +388,7 @@ : &WebAssembly::I32RegClass); unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), Reg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), Reg) .addImm(0); Addr.setReg(Reg); } @@ -460,12 +460,12 @@ } Register Imm = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::CONST_I32), Imm) .addImm(~(~uint64_t(0) << MVT(From).getSizeInBits())); Register Result = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::AND_I32), Result) .addReg(Reg) .addReg(Imm); @@ -490,18 +490,18 @@ } Register Imm = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::CONST_I32), Imm) .addImm(32 - MVT(From).getSizeInBits()); Register Left = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::SHL_I32), Left) .addReg(Reg) .addReg(Imm); Register Right = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::SHR_S_I32), Right) .addReg(Left) .addReg(Imm); @@ -519,7 +519,7 @@ Reg = zeroExtendToI32(Reg, V, From); Register Result = createResultReg(&WebAssembly::I64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::I64_EXTEND_U_I32), Result) .addReg(Reg); return Result; @@ -541,7 +541,7 @@ Reg = signExtendToI32(Reg, V, From); Register Result = createResultReg(&WebAssembly::I64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::I64_EXTEND_S_I32), Result) .addReg(Reg); return Result; @@ -580,7 +580,7 @@ assert(MRI.getRegClass(Reg) == &WebAssembly::I32RegClass); Register NotReg = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::EQZ_I32), NotReg) .addReg(Reg); return NotReg; @@ -588,7 +588,7 @@ unsigned WebAssemblyFastISel::copyValue(unsigned Reg) { Register ResultReg = createResultReg(MRI.getRegClass(Reg)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(WebAssembly::COPY), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::COPY), ResultReg) .addReg(Reg); return ResultReg; @@ -604,7 +604,7 @@ : &WebAssembly::I32RegClass); unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::COPY_I64 : WebAssembly::COPY_I32; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addFrameIndex(SI->second); return ResultReg; } @@ -623,7 +623,7 @@ : &WebAssembly::I32RegClass); unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addGlobalAddress(GV); return ResultReg; } @@ -717,7 +717,7 @@ return false; } Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addImm(I); updateValueMap(&Arg, ResultReg); @@ -859,7 +859,7 @@ return false; } - auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); + auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)); if (!IsVoid) MIB.addReg(ResultReg, RegState::Define); @@ -886,7 +886,7 @@ // as 64-bit for uniformity with other pointer types. // See also: WebAssemblyISelLowering.cpp: LowerCallResults if (Subtarget->hasAddr64()) { - auto Wrap = BuildMI(*FuncInfo.MBB, std::prev(FuncInfo.InsertPt), DbgLoc, + auto Wrap = BuildMI(*FuncInfo.MBB, std::prev(FuncInfo.InsertPt), MIMD, TII.get(WebAssembly::I32_WRAP_I64)); Register Reg32 = createResultReg(&WebAssembly::I32RegClass); Wrap.addReg(Reg32, RegState::Define); @@ -961,7 +961,7 @@ } Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(TrueReg) .addReg(FalseReg) .addReg(CondReg); @@ -979,7 +979,7 @@ if (Trunc->getOperand(0)->getType()->isIntegerTy(64)) { Register Result = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::I32_WRAP_I64), Result) .addReg(Reg); Reg = Result; @@ -1077,7 +1077,7 @@ return false; Register ResultReg = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(LHS) .addReg(RHS); updateValueMap(ICmp, ResultReg); @@ -1138,7 +1138,7 @@ } Register ResultReg = createResultReg(&WebAssembly::I32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(LHS) .addReg(RHS); @@ -1231,7 +1231,7 @@ materializeLoadStoreOperands(Addr); Register ResultReg = createResultReg(RC); - auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Load)); @@ -1291,7 +1291,7 @@ if (VTIsi1) ValueReg = maskI1Value(ValueReg, Store->getValueOperand()); - auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); + auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)); addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Store)); @@ -1319,7 +1319,7 @@ if (Not) Opc = WebAssembly::BR_UNLESS; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)) .addMBB(TBB) .addReg(CondReg); @@ -1334,7 +1334,7 @@ const auto *Ret = cast(I); if (Ret->getNumOperands() == 0) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::RETURN)); return true; } @@ -1379,14 +1379,14 @@ if (Reg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::RETURN)) .addReg(Reg); return true; } bool WebAssemblyFastISel::selectUnreachable(const Instruction *I) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::UNREACHABLE)); return true; } diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -467,7 +467,7 @@ ResultReg = createResultReg(RC); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); addFullAddress(MIB, AM); if (MMO) MIB->addMemOperand(*FuncInfo.MF, MMO); @@ -496,7 +496,7 @@ case MVT::i1: { // Mask out all but lowest bit. Register AndResult = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::AND8ri), AndResult) .addReg(ValReg).addImm(1); ValReg = AndResult; @@ -643,7 +643,7 @@ // any bugs before. ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1); MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, Desc); addFullAddress(MIB, AM).addReg(ValReg); if (MMO) MIB->addMemOperand(*FuncInfo.MF, MMO); @@ -679,7 +679,7 @@ if (Opc) { MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)); addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue() : CI->getZExtValue()); if (MMO) @@ -786,7 +786,7 @@ LoadReg = createResultReg(RC); MachineInstrBuilder LoadMI = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), LoadReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), LoadReg); addFullAddress(LoadMI, StubAM); // Ok, back to normal mode. @@ -1085,12 +1085,12 @@ // In 64-bit mode, we need a 64-bit register even if pointers are 32 bits. if (Reg && Subtarget->isTarget64BitILP32()) { Register CopyReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32rr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32rr), CopyReg) .addReg(Reg); Register ExtReg = createResultReg(&X86::GR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG), ExtReg) .addImm(0) .addReg(CopyReg) @@ -1267,7 +1267,7 @@ // Avoid a cross-class copy. This is very unlikely. if (!SrcRC->contains(DstReg)) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); // Add register to return instruction. @@ -1287,7 +1287,7 @@ assert(Reg && "SRetReturnReg should have been set in LowerFormalArguments()!"); unsigned RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), RetReg).addReg(Reg); RetRegs.push_back(RetReg); } @@ -1295,11 +1295,11 @@ // Now emit the RET. MachineInstrBuilder MIB; if (X86MFInfo->getBytesToPopOnReturn()) { - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Subtarget->is64Bit() ? X86::RETI64 : X86::RETI32)) .addImm(X86MFInfo->getBytesToPopOnReturn()); } else { - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Subtarget->is64Bit() ? X86::RET64 : X86::RET32)); } for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) @@ -1405,7 +1405,7 @@ } bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT, - const DebugLoc &CurDbgLoc) { + const DebugLoc &CurMIMD) { Register Op0Reg = getRegForValue(Op0); if (Op0Reg == 0) return false; @@ -1418,7 +1418,7 @@ // CMPri, otherwise use CMPrr. if (const ConstantInt *Op1C = dyn_cast(Op1)) { if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareImmOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD, TII.get(CompareImmOpc)) .addReg(Op0Reg) .addImm(Op1C->getSExtValue()); return true; @@ -1430,7 +1430,7 @@ Register Op1Reg = getRegForValue(Op1); if (Op1Reg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD, TII.get(CompareOpc)) .addReg(Op0Reg) .addReg(Op1Reg); @@ -1455,7 +1455,7 @@ default: break; case CmpInst::FCMP_FALSE: { ResultReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32r0), ResultReg); ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, X86::sub_8bit); if (!ResultReg) @@ -1464,7 +1464,7 @@ } case CmpInst::FCMP_TRUE: { ResultReg = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri), ResultReg).addImm(1); break; } @@ -1506,11 +1506,11 @@ Register FlagReg1 = createResultReg(&X86::GR8RegClass); Register FlagReg2 = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(SETFOpc[2]), ResultReg).addReg(FlagReg1).addReg(FlagReg2); updateValueMap(I, ResultReg); return true; @@ -1528,7 +1528,7 @@ if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc())) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), ResultReg).addImm(CC); updateValueMap(I, ResultReg); return true; @@ -1566,18 +1566,18 @@ } Register Result32 = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovInst), Result32) .addReg(ResultReg); ResultReg = createResultReg(&X86::GR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::SUBREG_TO_REG), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg) .addImm(0).addReg(Result32).addImm(X86::sub_32bit); } else if (DstVT == MVT::i16) { // i8->i16 doesn't exist in the autogenerated isel table. Need to zero // extend to 32-bits and then extract down to 16-bits. Register Result32 = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVZX32rr8), Result32).addReg(ResultReg); ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit); @@ -1611,7 +1611,7 @@ // Negate the result to make an 8-bit sign extended value. ResultReg = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::NEG8r), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::NEG8r), ResultReg).addReg(ZExtReg); SrcVT = MVT::i8; @@ -1621,7 +1621,7 @@ // i8->i16 doesn't exist in the autogenerated isel table. Need to sign // extend to 32-bits and then extract down to 16-bits. Register Result32 = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVSX32rr8), Result32).addReg(ResultReg); ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit); @@ -1655,8 +1655,8 @@ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI); switch (Predicate) { default: break; - case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true; - case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true; + case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, MIMD.getDL()); return true; + case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, MIMD.getDL()); return true; } const Value *CmpLHS = CI->getOperand(0); @@ -1706,13 +1706,13 @@ if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc())) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) .addMBB(TrueMBB).addImm(CC); // X86 requires a second branch to handle UNE (and OEQ, which is mapped // to UNE above). if (NeedExtraBranch) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) .addMBB(TrueMBB).addImm(X86::COND_P); } @@ -1737,7 +1737,7 @@ Register OpReg = getRegForValue(TI->getOperand(0)); if (OpReg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TestOpc)) .addReg(OpReg).addImm(1); unsigned JmpCond = X86::COND_NE; @@ -1746,7 +1746,7 @@ JmpCond = X86::COND_E; } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) .addMBB(TrueMBB).addImm(JmpCond); finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); @@ -1760,7 +1760,7 @@ if (TmpReg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) .addMBB(TrueMBB).addImm(CC); finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); return true; @@ -1776,15 +1776,15 @@ if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) { unsigned KOpReg = OpReg; OpReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), OpReg) .addReg(KOpReg); OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, X86::sub_8bit); } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri)) .addReg(OpReg) .addImm(1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) .addMBB(TrueMBB).addImm(X86::COND_NE); finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); return true; @@ -1842,18 +1842,18 @@ Register Op1Reg = getRegForValue(I->getOperand(1)); if (Op1Reg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), CReg).addReg(Op1Reg); // The shift instruction uses X86::CL. If we defined a super-register // of X86::CL, emit a subreg KILL to precisely describe what we're doing here. if (CReg != X86::CL) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::KILL), X86::CL) .addReg(CReg, RegState::Kill); Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(OpReg), ResultReg) .addReg(Op0Reg); updateValueMap(I, ResultReg); return true; @@ -1954,38 +1954,38 @@ return false; // Move op0 into low-order input register. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg); // Zero-extend or sign-extend into high-order input register. if (OpEntry.OpSignExtend) { if (OpEntry.IsOpSigned) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(OpEntry.OpSignExtend)); else { Register Zero32 = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32r0), Zero32); // Copy the zero into the appropriate sub/super/identical physical // register. Unfortunately the operations needed are not uniform enough // to fit neatly into the table above. if (VT == MVT::i16) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy), TypeEntry.HighInReg) .addReg(Zero32, 0, X86::sub_16bit); } else if (VT == MVT::i32) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy), TypeEntry.HighInReg) .addReg(Zero32); } else if (VT == MVT::i64) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg) .addImm(0).addReg(Zero32).addImm(X86::sub_32bit); } } } // Generate the DIV/IDIV instruction. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(OpEntry.OpDivRem)).addReg(Op1Reg); // For i8 remainder, we can't reference ah directly, as we'll end // up with bogus copies like %r9b = COPY %ah. Reference ax @@ -2001,11 +2001,11 @@ OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) { Register SourceSuperReg = createResultReg(&X86::GR16RegClass); Register ResultSuperReg = createResultReg(&X86::GR16RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy), SourceSuperReg).addReg(X86::AX); // Shift AX right by 8 bits instead of using AH. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SHR16ri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SHR16ri), ResultSuperReg).addReg(SourceSuperReg).addImm(8); // Now reference the 8-bit subreg of the result. @@ -2015,7 +2015,7 @@ // Copy the result out of the physreg if we haven't already. if (!ResultReg) { ResultReg = createResultReg(TypeEntry.RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy), ResultReg) .addReg(OpEntry.DivRemResultReg); } updateValueMap(I, ResultReg); @@ -2081,17 +2081,17 @@ if (SETFOpc) { Register FlagReg1 = createResultReg(&X86::GR8RegClass); Register FlagReg2 = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]); auto const &II = TII.get(SETFOpc[2]); if (II.getNumDefs()) { Register TmpReg = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, TmpReg) .addReg(FlagReg2).addReg(FlagReg1); } else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(FlagReg2).addReg(FlagReg1); } } @@ -2120,12 +2120,12 @@ if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { unsigned KCondReg = CondReg; CondReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), CondReg) .addReg(KCondReg); CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit); } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri)) .addReg(CondReg) .addImm(1); } @@ -2211,7 +2211,7 @@ // Need an IMPLICIT_DEF for the input that is used to generate the upper // bits of the result register since its not based on any of the inputs. Register ImplicitDefReg = createResultReg(VR128X); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); // Place RHSReg is the passthru of the masked movss/sd operation and put @@ -2222,7 +2222,7 @@ ImplicitDefReg, LHSReg); ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(MovReg); } else if (Subtarget->hasAVX()) { @@ -2243,7 +2243,7 @@ Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, LHSReg, CmpReg); ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg); } else { // Choose the SSE instruction sequence based on data type (float or double). @@ -2265,7 +2265,7 @@ Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, RHSReg); Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, AndReg); ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg); } updateValueMap(I, ResultReg); @@ -2320,12 +2320,12 @@ if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { unsigned KCondReg = CondReg; CondReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), CondReg) .addReg(KCondReg); CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit); } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri)) .addReg(CondReg) .addImm(1); } @@ -2367,7 +2367,7 @@ return false; const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(OpReg); updateValueMap(I, ResultReg); @@ -2437,7 +2437,7 @@ MVT DstVT = TLI.getValueType(DL, I->getType()).getSimpleVT(); const TargetRegisterClass *RC = TLI.getRegClassFor(DstVT); Register ImplicitDefReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); Register ResultReg = fastEmitInst_rr(Opcode, RC, ImplicitDefReg, OpReg); updateValueMap(I, ResultReg); @@ -2468,14 +2468,14 @@ unsigned ImplicitDefReg; if (HasAVX) { ImplicitDefReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); } Register ResultReg = createResultReg(RC); MachineInstrBuilder MIB; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpc), + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpc), ResultReg); if (HasAVX) @@ -2627,7 +2627,7 @@ Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr : X86::VMOVPDI2DIrr; ResultReg = createResultReg(&X86::GR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(InputReg, RegState::Kill); // The result value is in the lower 16-bits of ResultReg. @@ -2649,7 +2649,7 @@ // The result value is in the lower 32-bits of ResultReg. // Emit an explicit copy from register class VR128 to register class FR32. ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(InputReg, RegState::Kill); } @@ -2692,7 +2692,7 @@ // never directly reference the frame register (the TwoAddressInstruction- // Pass doesn't like that). Register SrcReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg); // Now recursively load from the frame address. @@ -2703,7 +2703,7 @@ unsigned Depth = cast(II->getOperand(0))->getZExtValue(); while (Depth--) { Register DestReg = createResultReg(RC); - addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg), SrcReg); SrcReg = DestReg; } @@ -2777,16 +2777,16 @@ if (!X86SelectAddress(DI->getAddress(), AM)) return false; const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); - assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && + assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) && "Expected inlined-at fields to agree"); - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM) + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II), AM) .addImm(0) .addMetadata(DI->getVariable()) .addMetadata(DI->getExpression()); return true; } case Intrinsic::trap: { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TRAP)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TRAP)); return true; } case Intrinsic::sqrt: { @@ -2827,13 +2827,13 @@ unsigned ImplicitDefReg = 0; if (AVXLevel > 0) { ImplicitDefReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); } Register ResultReg = createResultReg(RC); MachineInstrBuilder MIB; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); if (ImplicitDefReg) @@ -2907,7 +2907,7 @@ // We can use INC/DEC. ResultReg = createResultReg(TLI.getRegClassFor(VT)); bool IsDec = BaseOpc == ISD::SUB; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg) .addReg(LHSReg); } else @@ -2930,7 +2930,7 @@ static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX }; // First copy the first operand into RAX, which is an implicit input to // the X86::MUL*r instruction. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8]) .addReg(LHSReg); ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8], @@ -2941,7 +2941,7 @@ if (VT == MVT::i8) { // Copy the first operand into AL, which is an implicit input to the // X86::IMUL8r instruction. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), X86::AL) .addReg(LHSReg); ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg); @@ -2956,7 +2956,7 @@ // Assign to a GPR since the overflow return value is lowered to a SETcc. Register ResultReg2 = createResultReg(&X86::GR8RegClass); assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers."); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), ResultReg2).addImm(CondCode); updateValueMap(II, ResultReg, 2); @@ -3026,7 +3026,7 @@ return false; Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg) .addReg(Reg); updateValueMap(II, ResultReg); @@ -3125,7 +3125,7 @@ // Without this, EmitLiveInCopies may eliminate the livein if its only // use is a bitcast (which isn't turned into an instruction). Register ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg) .addReg(DstReg, getKillRegState(true)); updateValueMap(&Arg, ResultReg); @@ -3295,7 +3295,7 @@ // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown)) .addImm(NumBytes).addImm(0).addImm(0); // Walk the register/memloc assignments, inserting copies/loads. @@ -3385,7 +3385,7 @@ } if (VA.isRegLoc()) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg); OutRegs.push_back(VA.getLocReg()); } else { @@ -3426,7 +3426,7 @@ // GOT pointer. if (Subtarget->isPICStyleGOT()) { unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base); } @@ -3447,7 +3447,7 @@ unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs); assert((Subtarget->hasSSE1() || !NumXMMRegs) && "SSE registers cannot be used when SSE is disabled"); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri), X86::AL).addImm(NumXMMRegs); } @@ -3471,7 +3471,7 @@ if (CalleeOp) { // Register-indirect call. unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc)) + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc)) .addReg(CalleeOp); } else { // Direct call. @@ -3489,7 +3489,7 @@ ? (Is64Bit ? X86::CALL64m : X86::CALL32m) : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32); - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc)); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc)); if (NeedLoad) MIB.addReg(Is64Bit ? X86::RIP : 0).addImm(1).addReg(0); if (Symbol) @@ -3522,7 +3522,7 @@ ? NumBytes // Callee pops everything. : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CB); unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp)) .addImm(NumBytes).addImm(NumBytesForCalleeToPop); // Now handle call return values. @@ -3554,7 +3554,7 @@ } // Copy out the result. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), CopyReg).addReg(SrcReg); InRegs.push_back(VA.getLocReg()); @@ -3566,11 +3566,11 @@ unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; unsigned MemSize = ResVT.getSizeInBits()/8; int FI = MFI.CreateStackObject(MemSize, Align(MemSize), false); - addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc)), FI) .addReg(CopyReg); Opc = ResVT == MVT::f32 ? X86::MOVSSrm_alt : X86::MOVSDrm_alt; - addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg + i), FI); } } @@ -3659,7 +3659,7 @@ // with the wrong VT if we fall out of fast isel after selecting this. const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT); Register ResultReg = createResultReg(DstClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg); updateValueMap(I, ResultReg); @@ -3688,7 +3688,7 @@ return SrcReg; case MVT::i64: { Register ResultReg = createResultReg(&X86::GR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg) .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit); return ResultReg; @@ -3772,10 +3772,10 @@ // Large code model only applies to 64-bit mode. if (Subtarget->is64Bit() && CM == CodeModel::Large) { Register AddrReg = createResultReg(&X86::GR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri), AddrReg) .addConstantPoolIndex(CPI, 0, OpFlag); - MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); addRegReg(MIB, AddrReg, false, PICBase, false); MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( @@ -3785,7 +3785,7 @@ return ResultReg; } - addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg), CPI, PICBase, OpFlag); return ResultReg; @@ -3810,7 +3810,7 @@ TLI.getPointerTy(DL) == MVT::i64) { // The displacement code could be more than 32 bits away so we need to use // an instruction with a 64 bit immediate - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri), ResultReg) .addGlobalAddress(GV); } else { @@ -3818,7 +3818,7 @@ TLI.getPointerTy(DL) == MVT::i32 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r) : X86::LEA64r; - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg), AM); } return ResultReg; @@ -3860,7 +3860,7 @@ if (Opc) { Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); return ResultReg; } @@ -3890,7 +3890,7 @@ : X86::LEA64r; const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL)); Register ResultReg = createResultReg(RC); - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg), AM); return ResultReg; } @@ -3926,7 +3926,7 @@ } Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg); return ResultReg; } @@ -3990,18 +3990,18 @@ Op3 = constrainOperandRegClass(II, Op3, II.getNumDefs() + 3); if (II.getNumDefs() >= 1) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) .addReg(Op0) .addReg(Op1) .addReg(Op2) .addReg(Op3); else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) .addReg(Op0) .addReg(Op1) .addReg(Op2) .addReg(Op3); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; diff --git a/llvm/utils/TableGen/FastISelEmitter.cpp b/llvm/utils/TableGen/FastISelEmitter.cpp --- a/llvm/utils/TableGen/FastISelEmitter.cpp +++ b/llvm/utils/TableGen/FastISelEmitter.cpp @@ -655,7 +655,7 @@ for (unsigned i = 0; i < Memo.PhysRegs.size(); ++i) { if (Memo.PhysRegs[i] != "") - OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, " + OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, " << "TII.get(TargetOpcode::COPY), " << Memo.PhysRegs[i] << ").addReg(Op" << i << ");\n"; }