Index: include/llvm/CodeGen/MachineMemOperand.h =================================================================== --- include/llvm/CodeGen/MachineMemOperand.h +++ include/llvm/CodeGen/MachineMemOperand.h @@ -295,6 +295,9 @@ /// @{ void print(raw_ostream &OS) const; void print(raw_ostream &OS, ModuleSlotTracker &MST) const; + void print(raw_ostream &OS, ModuleSlotTracker &MST, + SmallVector &SSNs, const LLVMContext &Context, + const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const; /// @} friend bool operator==(const MachineMemOperand &LHS, Index: lib/CodeGen/MIRPrinter.cpp =================================================================== --- lib/CodeGen/MIRPrinter.cpp +++ lib/CodeGen/MIRPrinter.cpp @@ -19,7 +19,6 @@ #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/CodeGen/GlobalISel/RegisterBank.h" @@ -157,14 +156,10 @@ void print(const MachineBasicBlock &MBB); void print(const MachineInstr &MI); - void printIRValueReference(const Value &V); void printStackObjectReference(int FrameIndex); void print(const MachineInstr &MI, unsigned OpIdx, const TargetRegisterInfo *TRI, bool ShouldPrintRegisterTies, LLT TypeToPrint, bool PrintDef = true); - void print(const LLVMContext &Context, const TargetInstrInfo &TII, - const MachineMemOperand &Op); - void printSyncScope(const LLVMContext &Context, SyncScope::ID SSID); }; } // end namespace llvm @@ -696,36 +691,17 @@ if (!MI.memoperands_empty()) { OS << " :: "; const LLVMContext &Context = MF->getFunction().getContext(); + const MachineFrameInfo &MFI = MF->getFrameInfo(); bool NeedComma = false; for (const auto *Op : MI.memoperands()) { if (NeedComma) OS << ", "; - print(Context, *TII, *Op); + Op->print(OS, MST, SSNs, Context, &MFI, TII); NeedComma = true; } } } -void MIPrinter::printIRValueReference(const Value &V) { - if (isa(V)) { - V.printAsOperand(OS, /*PrintType=*/false, MST); - return; - } - if (isa(V)) { - // Machine memory operands can load/store to/from constant value pointers. - OS << '`'; - V.printAsOperand(OS, /*PrintType=*/true, MST); - OS << '`'; - return; - } - OS << "%ir."; - if (V.hasName()) { - printLLVMNameWithoutPrefix(OS, V.getName()); - return; - } - MachineOperand::printIRSlotNumber(OS, MST.getLocalSlot(&V)); -} - void MIPrinter::printStackObjectReference(int FrameIndex) { auto ObjectInfo = StackObjectOperandMapping.find(FrameIndex); assert(ObjectInfo != StackObjectOperandMapping.end() && @@ -786,134 +762,6 @@ } } -static const char *getTargetMMOFlagName(const TargetInstrInfo &TII, - unsigned TMMOFlag) { - auto Flags = TII.getSerializableMachineMemOperandTargetFlags(); - for (const auto &I : Flags) { - if (I.first == TMMOFlag) { - return I.second; - } - } - return nullptr; -} - -void MIPrinter::print(const LLVMContext &Context, const TargetInstrInfo &TII, - const MachineMemOperand &Op) { - OS << '('; - if (Op.isVolatile()) - OS << "volatile "; - if (Op.isNonTemporal()) - OS << "non-temporal "; - if (Op.isDereferenceable()) - OS << "dereferenceable "; - if (Op.isInvariant()) - OS << "invariant "; - if (Op.getFlags() & MachineMemOperand::MOTargetFlag1) - OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag1) - << "\" "; - if (Op.getFlags() & MachineMemOperand::MOTargetFlag2) - OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag2) - << "\" "; - if (Op.getFlags() & MachineMemOperand::MOTargetFlag3) - OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag3) - << "\" "; - - assert((Op.isLoad() || Op.isStore()) && "machine memory operand must be a load or store (or both)"); - if (Op.isLoad()) - OS << "load "; - if (Op.isStore()) - OS << "store "; - - printSyncScope(Context, Op.getSyncScopeID()); - - if (Op.getOrdering() != AtomicOrdering::NotAtomic) - OS << toIRString(Op.getOrdering()) << ' '; - if (Op.getFailureOrdering() != AtomicOrdering::NotAtomic) - OS << toIRString(Op.getFailureOrdering()) << ' '; - - OS << Op.getSize(); - if (const Value *Val = Op.getValue()) { - OS << ((Op.isLoad() && Op.isStore()) ? " on " - : Op.isLoad() ? " from " : " into "); - printIRValueReference(*Val); - } else if (const PseudoSourceValue *PVal = Op.getPseudoValue()) { - OS << ((Op.isLoad() && Op.isStore()) ? " on " - : Op.isLoad() ? " from " : " into "); - assert(PVal && "Expected a pseudo source value"); - switch (PVal->kind()) { - case PseudoSourceValue::Stack: - OS << "stack"; - break; - case PseudoSourceValue::GOT: - OS << "got"; - break; - case PseudoSourceValue::JumpTable: - OS << "jump-table"; - break; - case PseudoSourceValue::ConstantPool: - OS << "constant-pool"; - break; - case PseudoSourceValue::FixedStack: - printStackObjectReference( - cast(PVal)->getFrameIndex()); - break; - case PseudoSourceValue::GlobalValueCallEntry: - OS << "call-entry "; - cast(PVal)->getValue()->printAsOperand( - OS, /*PrintType=*/false, MST); - break; - case PseudoSourceValue::ExternalSymbolCallEntry: - OS << "call-entry &"; - printLLVMNameWithoutPrefix( - OS, cast(PVal)->getSymbol()); - break; - case PseudoSourceValue::TargetCustom: - llvm_unreachable("TargetCustom pseudo source values are not supported"); - break; - } - } - MachineOperand::printOperandOffset(OS, Op.getOffset()); - if (Op.getBaseAlignment() != Op.getSize()) - OS << ", align " << Op.getBaseAlignment(); - auto AAInfo = Op.getAAInfo(); - if (AAInfo.TBAA) { - OS << ", !tbaa "; - AAInfo.TBAA->printAsOperand(OS, MST); - } - if (AAInfo.Scope) { - OS << ", !alias.scope "; - AAInfo.Scope->printAsOperand(OS, MST); - } - if (AAInfo.NoAlias) { - OS << ", !noalias "; - AAInfo.NoAlias->printAsOperand(OS, MST); - } - if (Op.getRanges()) { - OS << ", !range "; - Op.getRanges()->printAsOperand(OS, MST); - } - if (unsigned AS = Op.getAddrSpace()) - OS << ", addrspace " << AS; - OS << ')'; -} - -void MIPrinter::printSyncScope(const LLVMContext &Context, SyncScope::ID SSID) { - switch (SSID) { - case SyncScope::System: { - break; - } - default: { - if (SSNs.empty()) - Context.getSyncScopeNames(SSNs); - - OS << "syncscope(\""; - PrintEscapedString(SSNs[SSID], OS); - OS << "\") "; - break; - } - } -} - void llvm::printMIR(raw_ostream &OS, const Module &M) { yaml::Output Out(OS); Out << const_cast(M); Index: lib/CodeGen/MachineBasicBlock.cpp =================================================================== --- lib/CodeGen/MachineBasicBlock.cpp +++ lib/CodeGen/MachineBasicBlock.cpp @@ -270,6 +270,7 @@ const Function &F = MF->getFunction(); const Module *M = F.getParent(); ModuleSlotTracker MST(M); + MST.incorporateFunction(F); print(OS, MST, Indexes, IsStandalone); } Index: lib/CodeGen/MachineInstr.cpp =================================================================== --- lib/CodeGen/MachineInstr.cpp +++ lib/CodeGen/MachineInstr.cpp @@ -1437,25 +1437,34 @@ } } - bool HaveSemi = false; if (!memoperands_empty()) { - if (!HaveSemi) { - OS << ";"; - HaveSemi = true; + SmallVector SSNs; + const LLVMContext *Context = nullptr; + std::unique_ptr CtxPtr; + const MachineFrameInfo *MFI = nullptr; + if (const MachineFunction *MF = getMFIfAvailable(*this)) { + MFI = &MF->getFrameInfo(); + Context = &MF->getFunction().getContext(); + } + if (!Context) { + CtxPtr = llvm::make_unique(); + Context = CtxPtr.get(); } - OS << " mem:"; - for (mmo_iterator i = memoperands_begin(), e = memoperands_end(); - i != e; ++i) { - (*i)->print(OS, MST); - if (std::next(i) != e) - OS << " "; + OS << " :: "; + bool NeedComma = false; + for (const MachineMemOperand *Op : memoperands()) { + if (NeedComma) + OS << ", "; + Op->print(OS, MST, SSNs, *Context, MFI, TII); + NeedComma = true; } } if (SkipDebugLoc) return; + bool HaveSemi = false; // Print debug location information. if (isDebugValue() && getOperand(e - 2).isMetadata()) { if (!HaveSemi) Index: lib/CodeGen/MachineOperand.cpp =================================================================== --- lib/CodeGen/MachineOperand.cpp +++ lib/CodeGen/MachineOperand.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "llvm/CodeGen/MachineOperand.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/Analysis/Loads.h" #include "llvm/CodeGen/MIRPrinter.h" #include "llvm/CodeGen/MachineFrameInfo.h" @@ -440,6 +441,57 @@ OS << ""; } +static void printIRValueReference(raw_ostream &OS, const Value &V, + ModuleSlotTracker &MST) { + if (isa(V)) { + V.printAsOperand(OS, /*PrintType=*/false, MST); + return; + } + if (isa(V)) { + // Machine memory operands can load/store to/from constant value pointers. + OS << '`'; + V.printAsOperand(OS, /*PrintType=*/true, MST); + OS << '`'; + return; + } + OS << "%ir."; + if (V.hasName()) { + printLLVMNameWithoutPrefix(OS, V.getName()); + return; + } + MachineOperand::printIRSlotNumber(OS, MST.getLocalSlot(&V)); +} + +static void printSyncScope(raw_ostream &OS, const LLVMContext &Context, + SyncScope::ID SSID, + SmallVector &SSNs) { + switch (SSID) { + case SyncScope::System: { + break; + } + default: { + if (SSNs.empty()) + Context.getSyncScopeNames(SSNs); + + OS << "syncscope(\""; + PrintEscapedString(SSNs[SSID], OS); + OS << "\") "; + break; + } + } +} + +static const char *getTargetMMOFlagName(const TargetInstrInfo &TII, + unsigned TMMOFlag) { + auto Flags = TII.getSerializableMachineMemOperandTargetFlags(); + for (const auto &I : Flags) { + if (I.first == TMMOFlag) { + return I.second; + } + } + return nullptr; +} + void MachineOperand::printSubRegIdx(raw_ostream &OS, uint64_t Index, const TargetRegisterInfo *TRI) { OS << "%subreg."; @@ -715,7 +767,7 @@ break; case MachineOperand::MO_FrameIndex: { int FrameIndex = getIndex(); - bool IsFixed = false; + bool IsFixed = true; StringRef Name; if (const MachineFunction *MF = getMFIfAvailable(*this)) { const MachineFrameInfo &MFI = MF->getFrameInfo(); @@ -961,108 +1013,125 @@ ModuleSlotTracker DummyMST(nullptr); print(OS, DummyMST); } + void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST) const { - assert((isLoad() || isStore()) && "SV has to be a load, store or both."); + SmallVector SSNs; + LLVMContext Ctx; + print(OS, MST, SSNs, Ctx, nullptr, nullptr); +} +void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST, + SmallVector &SSNs, + const LLVMContext &Context, + const MachineFrameInfo *MFI, + const TargetInstrInfo *TII) const { + OS << '('; if (isVolatile()) - OS << "Volatile "; - + OS << "volatile "; + if (isNonTemporal()) + OS << "non-temporal "; + if (isDereferenceable()) + OS << "dereferenceable "; + if (isInvariant()) + OS << "invariant "; + if (getFlags() & MachineMemOperand::MOTargetFlag1) + OS << '"' << getTargetMMOFlagName(*TII, MachineMemOperand::MOTargetFlag1) + << "\" "; + if (getFlags() & MachineMemOperand::MOTargetFlag2) + OS << '"' << getTargetMMOFlagName(*TII, MachineMemOperand::MOTargetFlag2) + << "\" "; + if (getFlags() & MachineMemOperand::MOTargetFlag3) + OS << '"' << getTargetMMOFlagName(*TII, MachineMemOperand::MOTargetFlag3) + << "\" "; + + assert((isLoad() || isStore()) && + "machine memory operand must be a load or store (or both)"); if (isLoad()) - OS << "LD"; + OS << "load "; if (isStore()) - OS << "ST"; - OS << getSize(); + OS << "store "; - // Print the address information. - OS << "["; - if (const Value *V = getValue()) - V->printAsOperand(OS, /*PrintType=*/false, MST); - else if (const PseudoSourceValue *PSV = getPseudoValue()) - PSV->printCustom(OS); - else - OS << ""; + printSyncScope(OS, Context, getSyncScopeID(), SSNs); - unsigned AS = getAddrSpace(); - if (AS != 0) - OS << "(addrspace=" << AS << ')'; - - // If the alignment of the memory reference itself differs from the alignment - // of the base pointer, print the base alignment explicitly, next to the base - // pointer. - if (getBaseAlignment() != getAlignment()) - OS << "(align=" << getBaseAlignment() << ")"; - - if (getOffset() != 0) - OS << "+" << getOffset(); - OS << "]"; - - // Print the alignment of the reference. - if (getBaseAlignment() != getAlignment() || getBaseAlignment() != getSize()) - OS << "(align=" << getAlignment() << ")"; - - // Print TBAA info. - if (const MDNode *TBAAInfo = getAAInfo().TBAA) { - OS << "(tbaa="; - if (TBAAInfo->getNumOperands() > 0) - TBAAInfo->getOperand(0)->printAsOperand(OS, MST); - else - OS << ""; - OS << ")"; - } + if (getOrdering() != AtomicOrdering::NotAtomic) + OS << toIRString(getOrdering()) << ' '; + if (getFailureOrdering() != AtomicOrdering::NotAtomic) + OS << toIRString(getFailureOrdering()) << ' '; - // Print AA scope info. - if (const MDNode *ScopeInfo = getAAInfo().Scope) { - OS << "(alias.scope="; - if (ScopeInfo->getNumOperands() > 0) - for (unsigned i = 0, ie = ScopeInfo->getNumOperands(); i != ie; ++i) { - ScopeInfo->getOperand(i)->printAsOperand(OS, MST); - if (i != ie - 1) - OS << ","; + OS << getSize(); + if (const Value *Val = getValue()) { + OS << ((isLoad() && isStore()) ? " on " : isLoad() ? " from " : " into "); + printIRValueReference(OS, *Val, MST); + } else if (const PseudoSourceValue *PVal = getPseudoValue()) { + OS << ((isLoad() && isStore()) ? " on " : isLoad() ? " from " : " into "); + assert(PVal && "Expected a pseudo source value"); + switch (PVal->kind()) { + case PseudoSourceValue::Stack: + OS << "stack"; + break; + case PseudoSourceValue::GOT: + OS << "got"; + break; + case PseudoSourceValue::JumpTable: + OS << "jump-table"; + break; + case PseudoSourceValue::ConstantPool: + OS << "constant-pool"; + break; + case PseudoSourceValue::FixedStack: { + int FrameIndex = cast(PVal)->getFrameIndex(); + bool IsFixed = true; + StringRef Name; + if (MFI) { + IsFixed = MFI->isFixedObjectIndex(FrameIndex); + if (const AllocaInst *Alloca = MFI->getObjectAllocation(FrameIndex)) + if (Alloca->hasName()) + Name = Alloca->getName(); + if (IsFixed) + FrameIndex -= MFI->getObjectIndexBegin(); } - else - OS << ""; - OS << ")"; + MachineOperand::printStackObjectReference(OS, FrameIndex, IsFixed, Name); + break; + } + case PseudoSourceValue::GlobalValueCallEntry: + OS << "call-entry "; + cast(PVal)->getValue()->printAsOperand( + OS, /*PrintType=*/false, MST); + break; + case PseudoSourceValue::ExternalSymbolCallEntry: + OS << "call-entry &"; + printLLVMNameWithoutPrefix( + OS, cast(PVal)->getSymbol()); + break; + case PseudoSourceValue::TargetCustom: + llvm_unreachable("TargetCustom pseudo source values are not supported"); + break; + } } - - // Print AA noalias scope info. - if (const MDNode *NoAliasInfo = getAAInfo().NoAlias) { - OS << "(noalias="; - if (NoAliasInfo->getNumOperands() > 0) - for (unsigned i = 0, ie = NoAliasInfo->getNumOperands(); i != ie; ++i) { - NoAliasInfo->getOperand(i)->printAsOperand(OS, MST); - if (i != ie - 1) - OS << ","; - } - else - OS << ""; - OS << ")"; + MachineOperand::printOperandOffset(OS, getOffset()); + if (getBaseAlignment() != getSize()) + OS << ", align " << getBaseAlignment(); + auto AAInfo = getAAInfo(); + if (AAInfo.TBAA) { + OS << ", !tbaa "; + AAInfo.TBAA->printAsOperand(OS, MST); } - - if (const MDNode *Ranges = getRanges()) { - unsigned NumRanges = Ranges->getNumOperands(); - if (NumRanges != 0) { - OS << "(ranges="; - - for (unsigned I = 0; I != NumRanges; ++I) { - Ranges->getOperand(I)->printAsOperand(OS, MST); - if (I != NumRanges - 1) - OS << ','; - } - - OS << ')'; - } + if (AAInfo.Scope) { + OS << ", !alias.scope "; + AAInfo.Scope->printAsOperand(OS, MST); + } + if (AAInfo.NoAlias) { + OS << ", !noalias "; + AAInfo.NoAlias->printAsOperand(OS, MST); + } + if (getRanges()) { + OS << ", !range "; + getRanges()->printAsOperand(OS, MST); } + // FIXME: Implement addrspace printing/parsing in MIR. + // For now, print this even though parsing it is not available in MIR. + if (unsigned AS = getAddrSpace()) + OS << ", addrspace " << AS; - if (isNonTemporal()) - OS << "(nontemporal)"; - if (isDereferenceable()) - OS << "(dereferenceable)"; - if (isInvariant()) - OS << "(invariant)"; - if (getFlags() & MOTargetFlag1) - OS << "(flag1)"; - if (getFlags() & MOTargetFlag2) - OS << "(flag2)"; - if (getFlags() & MOTargetFlag3) - OS << "(flag3)"; + OS << ')'; } Index: lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp @@ -34,6 +34,7 @@ #include "llvm/IR/DebugLoc.h" #include "llvm/IR/Function.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/ModuleSlotTracker.h" #include "llvm/IR/Value.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" @@ -401,6 +402,19 @@ }); } +// Print the MMO with more information from the SelectionDAG. +static void printMemOperand(raw_ostream &OS, const MachineMemOperand &MMO, + const SelectionDAG *G) { + const MachineFunction &MF = G->getMachineFunction(); + const Function &F = MF.getFunction(); + const MachineFrameInfo &MFI = MF.getFrameInfo(); + const TargetInstrInfo *TII = G->getSubtarget().getInstrInfo(); + ModuleSlotTracker MST(F.getParent()); + MST.incorporateFunction(F); + SmallVector SSNs; + MMO.print(OS, MST, SSNs, *G->getContext(), &MFI, TII); +} + #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) LLVM_DUMP_METHOD void SDNode::dump() const { dump(nullptr); } @@ -457,7 +471,7 @@ OS << "Mem:"; for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(), e = MN->memoperands_end(); i != e; ++i) { - OS << **i; + printMemOperand(OS, **i, G); if (std::next(i) != e) OS << " "; } @@ -549,7 +563,9 @@ OS << ":" << N->getVT().getEVTString(); } else if (const LoadSDNode *LD = dyn_cast(this)) { - OS << "<" << *LD->getMemOperand(); + OS << "<"; + + printMemOperand(OS, *LD->getMemOperand(), G); bool doExt = true; switch (LD->getExtensionType()) { @@ -567,7 +583,8 @@ OS << ">"; } else if (const StoreSDNode *ST = dyn_cast(this)) { - OS << "<" << *ST->getMemOperand(); + OS << "<"; + printMemOperand(OS, *ST->getMemOperand(), G); if (ST->isTruncatingStore()) OS << ", trunc to " << ST->getMemoryVT().getEVTString(); @@ -578,7 +595,9 @@ OS << ">"; } else if (const MemSDNode* M = dyn_cast(this)) { - OS << "<" << *M->getMemOperand() << ">"; + OS << "<"; + printMemOperand(OS, *M->getMemOperand(), G); + OS << ">"; } else if (const BlockAddressSDNode *BA = dyn_cast(this)) { int64_t offset = BA->getOffset(); Index: test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll +++ test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll @@ -46,7 +46,7 @@ ; The key problem here is that we may fail to create an MBB referenced by a ; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things ; happen. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: G_STORE %6:gpr(s32), %2:gpr(p0); mem:ST4[%addr] (in function: pending_phis) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: G_STORE %6:gpr(s32), %2:gpr(p0) :: (store seq_cst 4 into %ir.addr) (in function: pending_phis) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis ; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis: define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) { @@ -66,7 +66,7 @@ } ; General legalizer inability to handle types whose size wasn't a power of 2. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(s42), %0:_(p0); mem:ST6[%addr](align=8) (in function: odd_type) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(s42), %0:_(p0) :: (store 6 into %ir.addr, align 8) (in function: odd_type) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type: define void @odd_type(i42* %addr) { @@ -75,7 +75,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(<7 x s32>), %0:_(p0); mem:ST28[%addr](align=32) (in function: odd_vector) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(<7 x s32>), %0:_(p0) :: (store 28 into %ir.addr, align 32) (in function: odd_vector) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector: define void @odd_vector(<7 x i32>* %addr) { @@ -94,7 +94,7 @@ } ; Just to make sure we don't accidentally emit a normal load/store. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: %2:gpr(s64) = G_LOAD %0:gpr(p0); mem:LD8[%addr] (in function: atomic_ops) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: %2:gpr(s64) = G_LOAD %0:gpr(p0) :: (load seq_cst 8 from %ir.addr) (in function: atomic_ops) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops ; FALLBACK-WITH-REPORT-LABEL: atomic_ops: define i64 @atomic_ops(i64* %addr) { @@ -159,7 +159,7 @@ br label %block } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0:_(<2 x p0>), %4:_(p0); mem:ST16[undef] (in function: vector_of_pointers_insertelement) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0:_(<2 x p0>), %4:_(p0) :: (store 16 into `<2 x i16*>* undef`) (in function: vector_of_pointers_insertelement) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement ; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement: define void @vector_of_pointers_insertelement() { @@ -175,7 +175,7 @@ br label %block } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(s96), %3:_(p0); mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(s96), %3:_(p0) :: (store 12 into `%struct96* undef`, align 4) (in function: nonpow2_insertvalue_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_insertvalue_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_insertvalue_narrowing: %struct96 = type { float, float, float } @@ -185,7 +185,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing) +; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %3, %4 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_add_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_add_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_add_narrowing: define void @nonpow2_add_narrowing() { @@ -196,7 +196,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing) +; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %3, %4 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_add_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_or_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_or_narrowing: define void @nonpow2_or_narrowing() { @@ -207,7 +207,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_load_narrowing) +; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %0, %1 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_load_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_load_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_load_narrowing: define void @nonpow2_load_narrowing() { @@ -216,7 +216,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %3:_(s96), %0:_(p0); mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %3:_(s96), %0:_(p0) :: (store 12 into %ir.c, align 16) (in function: nonpow2_store_narrowing ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing: define void @nonpow2_store_narrowing(i96* %c) { @@ -226,7 +226,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0:_(s96), %1:_(p0); mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0:_(s96), %1:_(p0) :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_constant_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing: define void @nonpow2_constant_narrowing() { @@ -236,7 +236,7 @@ ; Currently can't handle vector lengths that aren't an exact multiple of ; natively supported vector lengths. Test that the fall-back works for those. -; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: :0:0: unable to legalize instruction: %1:_(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements +; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: :0:0: unable to legalize instruction: %1:_(<7 x s64>) = G_ADD %0, %0 (in function: nonpow2_vector_add_fewerelements ; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %2:_(s64) = G_EXTRACT_VECTOR_ELT %1:_(<7 x s64>), %3:_(s64) (in function: nonpow2_vector_add_fewerelements) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_vector_add_fewerelements ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_vector_add_fewerelements: Index: test/CodeGen/AArch64/aarch64-stp-cluster.ll =================================================================== --- test/CodeGen/AArch64/aarch64-stp-cluster.ll +++ test/CodeGen/AArch64/aarch64-stp-cluster.ll @@ -130,10 +130,10 @@ ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: stp_volatile:%bb.0 ; CHECK-NOT: Cluster ld/st -; CHECK:SU(2): STRXui %1:gpr64, %0:gpr64common, 3; mem:Volatile -; CHECK:SU(3): STRXui %1:gpr64, %0:gpr64common, 2; mem:Volatile -; CHECK:SU(4): STRXui %1:gpr64, %0:gpr64common, 1; mem:Volatile -; CHECK:SU(5): STRXui %1:gpr64, %0:gpr64common, 4; mem:Volatile +; CHECK:SU(2): STRXui %1:gpr64, %0:gpr64common, 3 :: (volatile +; CHECK:SU(3): STRXui %1:gpr64, %0:gpr64common, 2 :: (volatile +; CHECK:SU(4): STRXui %1:gpr64, %0:gpr64common, 1 :: (volatile +; CHECK:SU(5): STRXui %1:gpr64, %0:gpr64common, 4 :: (volatile define i64 @stp_volatile(i64* nocapture %P, i64 %v) { entry: %arrayidx = getelementptr inbounds i64, i64* %P, i64 3 Index: test/CodeGen/AArch64/arm64-misched-memdep-bug.ll =================================================================== --- test/CodeGen/AArch64/arm64-misched-memdep-bug.ll +++ test/CodeGen/AArch64/arm64-misched-memdep-bug.ll @@ -5,14 +5,14 @@ ; ; CHECK: ********** MI Scheduling ********** ; CHECK: misched_bug:%bb.0 entry -; CHECK: SU(2): %2:gpr32 = LDRWui %0:gpr64common, 1; mem:LD4[%ptr1_plus1] +; CHECK: SU(2): %2:gpr32 = LDRWui %0:gpr64common, 1 :: (load 4 from %ir.ptr1_plus1) ; CHECK: Successors: ; CHECK-NEXT: SU(5): Data Latency=4 Reg=%2 ; CHECK-NEXT: SU(4): Ord Latency=0 -; CHECK: SU(3): STRWui %wzr, %0:gpr64common, 0; mem:ST4[%ptr1] +; CHECK: SU(3): STRWui %wzr, %0:gpr64common, 0 :: (store 4 into %ir.ptr1) ; CHECK: Successors: ; CHECK: SU(4): Ord Latency=0 -; CHECK: SU(4): STRWui %wzr, %1:gpr64common, 0; mem:ST4[%ptr2] +; CHECK: SU(4): STRWui %wzr, %1:gpr64common, 0 :: (store 4 into %ir.ptr2) ; CHECK: SU(5): %w0 = COPY %2 ; CHECK: ** ScheduleDAGMI::schedule picking next node define i32 @misched_bug(i32* %ptr1, i32* %ptr2) { Index: test/CodeGen/AMDGPU/extload-align.ll =================================================================== --- test/CodeGen/AMDGPU/extload-align.ll +++ test/CodeGen/AMDGPU/extload-align.ll @@ -7,7 +7,7 @@ ; size and not 4 corresponding to the sign-extended size (i32). ; DEBUG: {{^}}# Machine code for function extload_align: -; DEBUG: mem:LD2[(addrspace=5)] +; DEBUG: (load 2, addrspace 5) ; DEBUG: {{^}}# End machine code for function extload_align. define amdgpu_kernel void @extload_align(i32 addrspace(5)* %out, i32 %index) #0 { Index: test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll =================================================================== --- test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll +++ test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll @@ -5,11 +5,11 @@ ; latency regardless of whether they are barriers or not. ; CHECK: ** List Scheduling -; CHECK: SU(2){{.*}}STR{{.*}}Volatile +; CHECK: SU(2){{.*}}STR{{.*}}(volatile ; CHECK-NOT: SU({{.*}}): Ord ; CHECK: SU(3): Ord Latency=1 ; CHECK-NOT: SU({{.*}}): Ord -; CHECK: SU(3){{.*}}LDR{{.*}}Volatile +; CHECK: SU(3){{.*}}LDR{{.*}}(volatile ; CHECK-NOT: SU({{.*}}): Ord ; CHECK: SU(2): Ord Latency=1 ; CHECK-NOT: SU({{.*}}): Ord Index: test/CodeGen/ARM/ldrd-memoper.ll =================================================================== --- test/CodeGen/ARM/ldrd-memoper.ll +++ test/CodeGen/ARM/ldrd-memoper.ll @@ -5,7 +5,7 @@ @b = external global i64* -; CHECK: Formed {{.*}} t2LDRD{{.*}} mem:LD4[%0] LD4[%0+4] +; CHECK: Formed {{.*}} t2LDRD{{.*}} (load 4 from %ir.0), (load 4 from %ir.0 + 4) define i64 @t(i64 %a) nounwind readonly { entry: %0 = load i64*, i64** @b, align 4 Index: test/CodeGen/ARM/misched-int-basic-thumb2.mir =================================================================== --- test/CodeGen/ARM/misched-int-basic-thumb2.mir +++ test/CodeGen/ARM/misched-int-basic-thumb2.mir @@ -42,7 +42,7 @@ # CHECK_SWIFT: Latency : 2 # CHECK_R52: Latency : 2 # -# CHECK: SU(3): %3:rgpr = t2LDRi12 %2:rgpr, 0, 14, %noreg; mem:LD4[@g1](dereferenceable) +# CHECK: SU(3): %3:rgpr = t2LDRi12 %2:rgpr, 0, 14, %noreg :: (dereferenceable load 4 from @g1) # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 3 # CHECK_R52: Latency : 4 @@ -57,7 +57,7 @@ # CHECK_SWIFT: Latency : 14 # CHECK_R52: Latency : 8 -# CHECK: SU(8): t2STRi12 %7:rgpr, %2:rgpr, 0, 14, %noreg; mem:ST4[@g1] +# CHECK: SU(8): t2STRi12 %7:rgpr, %2:rgpr, 0, 14, %noreg :: (store 4 into @g1) # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 0 # CHECK_R52: Latency : 4 Index: test/CodeGen/ARM/single-issue-r52.mir =================================================================== --- test/CodeGen/ARM/single-issue-r52.mir +++ test/CodeGen/ARM/single-issue-r52.mir @@ -20,7 +20,7 @@ # CHECK: ********** MI Scheduling ********** # CHECK: ScheduleDAGMILive::schedule starting -# CHECK: SU(1): %1:qqpr = VLD4d8Pseudo %0:gpr, 8, 14, %noreg; mem:LD32[%A](align=8) +# CHECK: SU(1): %1:qqpr = VLD4d8Pseudo %0:gpr, 8, 14, %noreg :: (load 32 from %ir.A, align 8) # CHECK: Latency : 8 # CHECK: Single Issue : true; # CHECK: SU(2): %4:dpr = VADDv8i8 %1.dsub_0:qqpr, %1.dsub_1:qqpr, 14, %noreg Index: test/CodeGen/Hexagon/post-inc-aa-metadata.ll =================================================================== --- test/CodeGen/Hexagon/post-inc-aa-metadata.ll +++ test/CodeGen/Hexagon/post-inc-aa-metadata.ll @@ -3,7 +3,7 @@ ; Check that the generated post-increment load has TBAA information. ; CHECK-LABEL: Machine code for function fred: -; CHECK: = V6_vL32b_pi %{{[0-9]+}}, 64; mem:LD64[{{.*}}](tbaa= +; CHECK: = V6_vL32b_pi %{{[0-9]+}}, 64 :: (load 64{{.*}}!tbaa target triple = "hexagon" Index: test/CodeGen/PowerPC/byval-agg-info.ll =================================================================== --- test/CodeGen/PowerPC/byval-agg-info.ll +++ test/CodeGen/PowerPC/byval-agg-info.ll @@ -12,6 +12,6 @@ } ; Make sure that the MMO on the store has no offset from the byval -; variable itself (we used to have mem:ST8[%v+64]). -; CHECK: STD killed renamable %x5, 176, %x1; mem:ST8[%v](align=16) +; variable itself (we used to have :: (store 8 into %ir.v + 64, align 16)). +; CHECK: STD killed renamable %x5, 176, %x1 :: (store 8 into %ir.v, align 16) Index: test/CodeGen/PowerPC/combine_loads_from_build_pair.ll =================================================================== --- test/CodeGen/PowerPC/combine_loads_from_build_pair.ll +++ test/CodeGen/PowerPC/combine_loads_from_build_pair.ll @@ -9,13 +9,13 @@ ; so we expect the LD8 to load from the address used in the original HIBITS ; load. ; CHECK-LABEL: Initial selection DAG: -; CHECK-DAG: [[LOBITS:t[0-9]+]]: i32,ch = load -; CHECK-DAG: [[HIBITS:t[0-9]+]]: i32,ch = load +; CHECK-DAG: [[LOBITS:t[0-9]+]]: i32,ch = load<(load 4 from %fixed-stack.1)> +; CHECK-DAG: [[HIBITS:t[0-9]+]]: i32,ch = load<(load 4 from %fixed-stack.2)> ; CHECK: Combining: t{{[0-9]+}}: i64 = build_pair [[LOBITS]], [[HIBITS]] ; CHECK-NEXT: Creating new node -; CHECK-SAME: load ; CHECK-NEXT: into -; CHECK-SAME: load ; CHECK-LABEL: Optimized lowered selection DAG: %result = extractvalue {i64, i8* } %struct, 0 ret i64 %result Index: test/CodeGen/PowerPC/unal-vec-negarith.ll =================================================================== --- test/CodeGen/PowerPC/unal-vec-negarith.ll +++ test/CodeGen/PowerPC/unal-vec-negarith.ll @@ -9,8 +9,8 @@ %r = load <16 x i8>, <16 x i8>* %p, align 1 ret <16 x i8> %r -; CHECK-NOT: v4i32,ch = llvm.ppc.altivec.lvx{{.*}} -; CHECK: v4i32,ch = llvm.ppc.altivec.lvx{{.*}} +; CHECK-NOT: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<(load 31 from %ir.p + 4294967281, align 1)> +; CHECK: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<(load 31 from %ir.p - 15, align 1)> } attributes #0 = { nounwind "target-cpu"="pwr7" } Index: test/CodeGen/X86/GlobalISel/x86_64-fallback.ll =================================================================== --- test/CodeGen/X86/GlobalISel/x86_64-fallback.ll +++ test/CodeGen/X86/GlobalISel/x86_64-fallback.ll @@ -8,7 +8,7 @@ ; the fallback path. ; Check that we fallback on invoke translation failures. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(s80), %0:_(p0); mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(s80), %0:_(p0) :: (store 10 into %ir.ptr, align 16) (in function: test_x86_fp80_dump) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump ; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump: define void @test_x86_fp80_dump(x86_fp80* %ptr){ Index: test/CodeGen/X86/merge-store-partially-alias-loads.ll =================================================================== --- test/CodeGen/X86/merge-store-partially-alias-loads.ll +++ test/CodeGen/X86/merge-store-partially-alias-loads.ll @@ -18,12 +18,12 @@ ; DBGDAG-DAG: [[BASEPTR:t[0-9]+]]: i64,ch = CopyFromReg [[ENTRYTOKEN]], ; DBGDAG-DAG: [[ADDPTR:t[0-9]+]]: i64 = add {{(nuw )?}}[[BASEPTR]], Constant:i64<2> -; DBGDAG-DAG: [[LD2:t[0-9]+]]: i16,ch = load [[ENTRYTOKEN]], [[BASEPTR]], undef:i64 -; DBGDAG-DAG: [[LD1:t[0-9]+]]: i8,ch = load [[ENTRYTOKEN]], [[ADDPTR]], undef:i64 +; DBGDAG-DAG: [[LD2:t[0-9]+]]: i16,ch = load<(load 2 from %ir.tmp81, align 1)> [[ENTRYTOKEN]], [[BASEPTR]], undef:i64 +; DBGDAG-DAG: [[LD1:t[0-9]+]]: i8,ch = load<(load 1 from %ir.tmp12)> [[ENTRYTOKEN]], [[ADDPTR]], undef:i64 -; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store [[ENTRYTOKEN]], [[LD1]], t{{[0-9]+}}, undef:i64 +; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store<(store 1 into %ir.tmp14)> [[ENTRYTOKEN]], [[LD1]], t{{[0-9]+}}, undef:i64 ; DBGDAG-DAG: [[LOADTOKEN:t[0-9]+]]: ch = TokenFactor [[LD2]]:1, [[LD1]]:1 -; DBGDAG-DAG: [[ST2:t[0-9]+]]: ch = store [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}, undef:i64 +; DBGDAG-DAG: [[ST2:t[0-9]+]]: ch = store<(store 2 into %ir.tmp10, align 1)> [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}, undef:i64 ; DBGDAG: X86ISD::RET_FLAG t{{[0-9]+}}, Index: test/CodeGen/X86/stack-protector-weight.ll =================================================================== --- test/CodeGen/X86/stack-protector-weight.ll +++ test/CodeGen/X86/stack-protector-weight.ll @@ -16,16 +16,16 @@ ; DARWIN-IR: CALL64pcrel32 @__stack_chk_fail ; MSVC-SELDAG: # Machine code for function test_branch_weights: -; MSVC-SELDAG: mem:Volatile LD4[@__security_cookie] -; MSVC-SELDAG: ST4[FixedStack0] -; MSVC-SELDAG: LD4[FixedStack0] +; MSVC-SELDAG: :: (volatile load 4 from @__security_cookie) +; MSVC-SELDAG: (store 4 into stack) +; MSVC-SELDAG: (volatile load 4 from %stack.0.StackGuardSlot) ; MSVC-SELDAG: CALLpcrel32 @__security_check_cookie ; MSVC always uses selection DAG now. ; MSVC-IR: # Machine code for function test_branch_weights: -; MSVC-IR: mem:Volatile LD4[@__security_cookie] -; MSVC-IR: ST4[FixedStack0] -; MSVC-IR: LD4[FixedStack0] +; MSVC-IR: :: (volatile load 4 from @__security_cookie) +; MSVC-IR: (store 4 into stack) +; MSVC-IR: (volatile load 4 from %stack.0.StackGuardSlot) ; MSVC-IR: CALLpcrel32 @__security_check_cookie define i32 @test_branch_weights(i32 %n) #0 {