Index: llvm/trunk/docs/MIRLangRef.rst =================================================================== --- llvm/trunk/docs/MIRLangRef.rst +++ llvm/trunk/docs/MIRLangRef.rst @@ -246,13 +246,25 @@ .. code-block:: text - %bb.[.] + %bb. -Examples: +Example: .. code-block:: llvm %bb.0 + +The following syntax is also supported, but the former syntax is preferred for +block references: + +.. code-block:: text + + %bb.[.] + +Example: + +.. code-block:: llvm + %bb.1.then Successors Index: llvm/trunk/docs/NVPTXUsage.rst =================================================================== --- llvm/trunk/docs/NVPTXUsage.rst +++ llvm/trunk/docs/NVPTXUsage.rst @@ -499,7 +499,7 @@ .reg .s32 %r<2>; .reg .s64 %rl<8>; - // BB#0: // %entry + // %bb.0: // %entry ld.param.u64 %rl1, [kernel_param_0]; mov.u32 %r1, %tid.x; mul.wide.s32 %rl2, %r1, 4; @@ -897,7 +897,7 @@ .reg .s32 %r<21>; .reg .s64 %rl<8>; - // BB#0: // %entry + // %bb.0: // %entry ld.param.u64 %rl2, [kernel_param_0]; mov.u32 %r3, %tid.x; ld.param.u64 %rl3, [kernel_param_1]; @@ -921,7 +921,7 @@ abs.f32 %f4, %f1; setp.gtu.f32 %p4, %f4, 0f7F800000; @%p4 bra BB0_4; - // BB#3: // %__nv_isnanf.exit5.i + // %bb.3: // %__nv_isnanf.exit5.i abs.f32 %f5, %f2; setp.le.f32 %p5, %f5, 0f7F800000; @%p5 bra BB0_5; @@ -953,7 +953,7 @@ selp.f32 %f110, 0f7F800000, %f99, %p16; setp.eq.f32 %p17, %f110, 0f7F800000; @%p17 bra BB0_28; - // BB#27: + // %bb.27: fma.rn.f32 %f110, %f110, %f108, %f110; BB0_28: // %__internal_accurate_powf.exit.i setp.lt.f32 %p18, %f1, 0f00000000; Index: llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h +++ llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h @@ -25,6 +25,7 @@ #include "llvm/MC/LaneBitmask.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/Support/BranchProbability.h" +#include "llvm/Support/Printable.h" #include #include #include @@ -771,6 +772,14 @@ raw_ostream& operator<<(raw_ostream &OS, const MachineBasicBlock &MBB); +/// Prints a machine basic block reference. +/// +/// The format is: +/// %bb.5 - a machine basic block with MBB.getNumber() == 5. +/// +/// Usage: OS << printMBBReference(MBB) << '\n'; +Printable printMBBReference(const MachineBasicBlock &MBB); + // This is useful when building IndexedMaps keyed on basic block pointers. struct MBB2NumberFunctor { using argument_type = const MachineBasicBlock *; Index: llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp =================================================================== --- llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2710,7 +2710,8 @@ (isBlockOnlyReachableByFallthrough(&MBB) && !MBB.isEHFuncletEntry())) { if (isVerbose()) { // NOTE: Want this comment at start of line, don't emit with AddComment. - OutStreamer->emitRawComment(" BB#" + Twine(MBB.getNumber()) + ":", false); + OutStreamer->emitRawComment(" %bb." + Twine(MBB.getNumber()) + ":", + false); } } else { OutStreamer->EmitLabel(MBB.getSymbol()); Index: llvm/trunk/lib/CodeGen/BranchFolding.cpp =================================================================== --- llvm/trunk/lib/CodeGen/BranchFolding.cpp +++ llvm/trunk/lib/CodeGen/BranchFolding.cpp @@ -613,8 +613,8 @@ CommonTailLen = ComputeCommonTailLength(MBB1, MBB2, I1, I2); if (CommonTailLen == 0) return false; - DEBUG(dbgs() << "Common tail length of BB#" << MBB1->getNumber() - << " and BB#" << MBB2->getNumber() << " is " << CommonTailLen + DEBUG(dbgs() << "Common tail length of " << printMBBReference(*MBB1) + << " and " << printMBBReference(*MBB2) << " is " << CommonTailLen << '\n'); // It's almost always profitable to merge any number of non-terminator @@ -770,7 +770,7 @@ SameTails[commonTailIndex].getTailStartPos(); MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock(); - DEBUG(dbgs() << "\nSplitting BB#" << MBB->getNumber() << ", size " + DEBUG(dbgs() << "\nSplitting " << printMBBReference(*MBB) << ", size " << maxCommonTailLength); // If the split block unconditionally falls-thru to SuccBB, it will be @@ -920,20 +920,17 @@ bool MadeChange = false; DEBUG(dbgs() << "\nTryTailMergeBlocks: "; - for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) - dbgs() << "BB#" << MergePotentials[i].getBlock()->getNumber() - << (i == e-1 ? "" : ", "); - dbgs() << "\n"; - if (SuccBB) { - dbgs() << " with successor BB#" << SuccBB->getNumber() << '\n'; + for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) dbgs() + << printMBBReference(*MergePotentials[i].getBlock()) + << (i == e - 1 ? "" : ", "); + dbgs() << "\n"; if (SuccBB) { + dbgs() << " with successor " << printMBBReference(*SuccBB) << '\n'; if (PredBB) - dbgs() << " which has fall-through from BB#" - << PredBB->getNumber() << "\n"; - } - dbgs() << "Looking for common tails of at least " - << MinCommonTailLength << " instruction" - << (MinCommonTailLength == 1 ? "" : "s") << '\n'; - ); + dbgs() << " which has fall-through from " + << printMBBReference(*PredBB) << "\n"; + } dbgs() << "Looking for common tails of at least " + << MinCommonTailLength << " instruction" + << (MinCommonTailLength == 1 ? "" : "s") << '\n';); // Sort by hash value so that blocks with identical end sequences sort // together. @@ -1013,13 +1010,13 @@ // MBB is common tail. Adjust all other BB's to jump to this one. // Traversal must be forwards so erases work. - DEBUG(dbgs() << "\nUsing common tail in BB#" << MBB->getNumber() + DEBUG(dbgs() << "\nUsing common tail in " << printMBBReference(*MBB) << " for "); for (unsigned int i=0, e = SameTails.size(); i != e; ++i) { if (commonTailIndex == i) continue; - DEBUG(dbgs() << "BB#" << SameTails[i].getBlock()->getNumber() - << (i == e-1 ? "" : ", ")); + DEBUG(dbgs() << printMBBReference(*SameTails[i].getBlock()) + << (i == e - 1 ? "" : ", ")); // Hack the end off BB i, making it jump to BB commonTailIndex instead. replaceTailWithBranchTo(SameTails[i].getTailStartPos(), *MBB); // BB i is no longer a predecessor of SuccBB; remove it from the worklist. Index: llvm/trunk/lib/CodeGen/BranchRelaxation.cpp =================================================================== --- llvm/trunk/lib/CodeGen/BranchRelaxation.cpp +++ llvm/trunk/lib/CodeGen/BranchRelaxation.cpp @@ -143,7 +143,7 @@ LLVM_DUMP_METHOD void BranchRelaxation::dumpBBs() { for (auto &MBB : *MF) { const BasicBlockInfo &BBI = BlockInfo[MBB.getNumber()]; - dbgs() << format("BB#%u\toffset=%08x\t", MBB.getNumber(), BBI.Offset) + dbgs() << format("%bb.%u\toffset=%08x\t", MBB.getNumber(), BBI.Offset) << format("size=%#x\n", BBI.Size); } } @@ -287,13 +287,10 @@ if (TII->isBranchOffsetInRange(MI.getOpcode(), DestOffset - BrOffset)) return true; - DEBUG( - dbgs() << "Out of range branch to destination BB#" << DestBB.getNumber() - << " from BB#" << MI.getParent()->getNumber() - << " to " << DestOffset - << " offset " << DestOffset - BrOffset - << '\t' << MI - ); + DEBUG(dbgs() << "Out of range branch to destination " + << printMBBReference(DestBB) << " from " + << printMBBReference(*MI.getParent()) << " to " << DestOffset + << " offset " << DestOffset - BrOffset << '\t' << MI); return false; } @@ -366,9 +363,9 @@ // just created), so we can invert the condition. MachineBasicBlock &NextBB = *std::next(MachineFunction::iterator(MBB)); - DEBUG(dbgs() << " Insert B to BB#" << TBB->getNumber() - << ", invert condition and change dest. to BB#" - << NextBB.getNumber() << '\n'); + DEBUG(dbgs() << " Insert B to " << printMBBReference(*TBB) + << ", invert condition and change dest. to " + << printMBBReference(NextBB) << '\n'); unsigned &MBBSize = BlockInfo[MBB->getNumber()].Size; Index: llvm/trunk/lib/CodeGen/EarlyIfConversion.cpp =================================================================== --- llvm/trunk/lib/CodeGen/EarlyIfConversion.cpp +++ llvm/trunk/lib/CodeGen/EarlyIfConversion.cpp @@ -185,7 +185,7 @@ // Reject any live-in physregs. It's probably CPSR/EFLAGS, and very hard to // get right. if (!MBB->livein_empty()) { - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has live-ins.\n"); + DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n"); return false; } @@ -199,7 +199,7 @@ continue; if (++InstrCount > BlockInstrLimit && !Stress) { - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has more than " + DEBUG(dbgs() << printMBBReference(*MBB) << " has more than " << BlockInstrLimit << " instructions.\n"); return false; } @@ -246,7 +246,7 @@ if (!DefMI || DefMI->getParent() != Head) continue; if (InsertAfter.insert(DefMI).second) - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " depends on " << *DefMI); + DEBUG(dbgs() << printMBBReference(*MBB) << " depends on " << *DefMI); if (DefMI->isTerminator()) { DEBUG(dbgs() << "Can't insert instructions below terminator.\n"); return false; @@ -361,10 +361,10 @@ if (Succ1->pred_size() != 1 || Succ1->succ_size() != 1 || Succ1->succ_begin()[0] != Tail) return false; - DEBUG(dbgs() << "\nDiamond: BB#" << Head->getNumber() - << " -> BB#" << Succ0->getNumber() - << "/BB#" << Succ1->getNumber() - << " -> BB#" << Tail->getNumber() << '\n'); + DEBUG(dbgs() << "\nDiamond: " << printMBBReference(*Head) << " -> " + << printMBBReference(*Succ0) << "/" + << printMBBReference(*Succ1) << " -> " + << printMBBReference(*Tail) << '\n'); // Live-in physregs are tricky to get right when speculating code. if (!Tail->livein_empty()) { @@ -372,9 +372,9 @@ return false; } } else { - DEBUG(dbgs() << "\nTriangle: BB#" << Head->getNumber() - << " -> BB#" << Succ0->getNumber() - << " -> BB#" << Tail->getNumber() << '\n'); + DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> " + << printMBBReference(*Succ0) << " -> " + << printMBBReference(*Tail) << '\n'); } // This is a triangle or a diamond. @@ -563,8 +563,8 @@ assert(Head->succ_empty() && "Additional head successors?"); if (!ExtraPreds && Head->isLayoutSuccessor(Tail)) { // Splice Tail onto the end of Head. - DEBUG(dbgs() << "Joining tail BB#" << Tail->getNumber() - << " into head BB#" << Head->getNumber() << '\n'); + DEBUG(dbgs() << "Joining tail " << printMBBReference(*Tail) << " into head " + << printMBBReference(*Head) << '\n'); Head->splice(Head->end(), Tail, Tail->begin(), Tail->end()); Head->transferSuccessorsAndUpdatePHIs(Tail); Index: llvm/trunk/lib/CodeGen/EdgeBundles.cpp =================================================================== --- llvm/trunk/lib/CodeGen/EdgeBundles.cpp +++ llvm/trunk/lib/CodeGen/EdgeBundles.cpp @@ -80,13 +80,15 @@ O << "digraph {\n"; for (const auto &MBB : *MF) { unsigned BB = MBB.getNumber(); - O << "\t\"BB#" << BB << "\" [ shape=box ]\n" - << '\t' << G.getBundle(BB, false) << " -> \"BB#" << BB << "\"\n" - << "\t\"BB#" << BB << "\" -> " << G.getBundle(BB, true) << '\n'; + O << "\t\"" << printMBBReference(MBB) << "\" [ shape=box ]\n" + << '\t' << G.getBundle(BB, false) << " -> \"" << printMBBReference(MBB) + << "\"\n" + << "\t\"" << printMBBReference(MBB) << "\" -> " << G.getBundle(BB, true) + << '\n'; for (MachineBasicBlock::const_succ_iterator SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI) - O << "\t\"BB#" << BB << "\" -> \"BB#" << (*SI)->getNumber() - << "\" [ color=lightgray ]\n"; + O << "\t\"" << printMBBReference(MBB) << "\" -> \"" + << printMBBReference(**SI) << "\" [ color=lightgray ]\n"; } O << "}\n"; return O; Index: llvm/trunk/lib/CodeGen/ExecutionDepsFix.cpp =================================================================== --- llvm/trunk/lib/CodeGen/ExecutionDepsFix.cpp +++ llvm/trunk/lib/CodeGen/ExecutionDepsFix.cpp @@ -200,7 +200,7 @@ LiveRegs[rx].Def = -1; } } - DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": entry\n"); + DEBUG(dbgs() << printMBBReference(*MBB) << ": entry\n"); return; } @@ -246,7 +246,7 @@ } } DEBUG( - dbgs() << "BB#" << MBB->getNumber() + dbgs() << printMBBReference(*MBB) << (!isBlockDone(MBB) ? ": incomplete\n" : ": all preds known\n")); } Index: llvm/trunk/lib/CodeGen/IfConversion.cpp =================================================================== --- llvm/trunk/lib/CodeGen/IfConversion.cpp +++ llvm/trunk/lib/CodeGen/IfConversion.cpp @@ -406,12 +406,12 @@ case ICSimpleFalse: { bool isFalse = Kind == ICSimpleFalse; if ((isFalse && DisableSimpleF) || (!isFalse && DisableSimple)) break; - DEBUG(dbgs() << "Ifcvt (Simple" << (Kind == ICSimpleFalse ? - " false" : "") - << "): BB#" << BBI.BB->getNumber() << " (" - << ((Kind == ICSimpleFalse) - ? BBI.FalseBB->getNumber() - : BBI.TrueBB->getNumber()) << ") "); + DEBUG(dbgs() << "Ifcvt (Simple" + << (Kind == ICSimpleFalse ? " false" : "") + << "): " << printMBBReference(*BBI.BB) << " (" + << ((Kind == ICSimpleFalse) ? BBI.FalseBB->getNumber() + : BBI.TrueBB->getNumber()) + << ") "); RetVal = IfConvertSimple(BBI, Kind); DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n"); if (RetVal) { @@ -435,9 +435,9 @@ DEBUG(dbgs() << " false"); if (isRev) DEBUG(dbgs() << " rev"); - DEBUG(dbgs() << "): BB#" << BBI.BB->getNumber() << " (T:" - << BBI.TrueBB->getNumber() << ",F:" - << BBI.FalseBB->getNumber() << ") "); + DEBUG(dbgs() << "): " << printMBBReference(*BBI.BB) + << " (T:" << BBI.TrueBB->getNumber() + << ",F:" << BBI.FalseBB->getNumber() << ") "); RetVal = IfConvertTriangle(BBI, Kind); DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n"); if (RetVal) { @@ -453,9 +453,9 @@ } case ICDiamond: if (DisableDiamond) break; - DEBUG(dbgs() << "Ifcvt (Diamond): BB#" << BBI.BB->getNumber() << " (T:" - << BBI.TrueBB->getNumber() << ",F:" - << BBI.FalseBB->getNumber() << ") "); + DEBUG(dbgs() << "Ifcvt (Diamond): " << printMBBReference(*BBI.BB) + << " (T:" << BBI.TrueBB->getNumber() + << ",F:" << BBI.FalseBB->getNumber() << ") "); RetVal = IfConvertDiamond(BBI, Kind, NumDups, NumDups2, Token->TClobbersPred, Token->FClobbersPred); @@ -464,10 +464,9 @@ break; case ICForkedDiamond: if (DisableForkedDiamond) break; - DEBUG(dbgs() << "Ifcvt (Forked Diamond): BB#" - << BBI.BB->getNumber() << " (T:" - << BBI.TrueBB->getNumber() << ",F:" - << BBI.FalseBB->getNumber() << ") "); + DEBUG(dbgs() << "Ifcvt (Forked Diamond): " << printMBBReference(*BBI.BB) + << " (T:" << BBI.TrueBB->getNumber() + << ",F:" << BBI.FalseBB->getNumber() << ") "); RetVal = IfConvertForkedDiamond(BBI, Kind, NumDups, NumDups2, Token->TClobbersPred, Token->FClobbersPred); Index: llvm/trunk/lib/CodeGen/LiveDebugVariables.cpp =================================================================== --- llvm/trunk/lib/CodeGen/LiveDebugVariables.cpp +++ llvm/trunk/lib/CodeGen/LiveDebugVariables.cpp @@ -1174,7 +1174,7 @@ MachineFunction::iterator MBB = LIS.getMBBFromIndex(Start)->getIterator(); SlotIndex MBBEnd = LIS.getMBBEndIdx(&*MBB); - DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd); + DEBUG(dbgs() << ' ' << printMBBReference(*MBB) << '-' << MBBEnd); insertDebugValue(&*MBB, Start, Stop, Loc, Spilled, LIS, TII, TRI); // This interval may span multiple basic blocks. // Insert a DBG_VALUE into each one. @@ -1184,7 +1184,7 @@ if (++MBB == MFEnd) break; MBBEnd = LIS.getMBBEndIdx(&*MBB); - DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd); + DEBUG(dbgs() << ' ' << printMBBReference(*MBB) << '-' << MBBEnd); insertDebugValue(&*MBB, Start, Stop, Loc, Spilled, LIS, TII, TRI); } DEBUG(dbgs() << '\n'); Index: llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp =================================================================== --- llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp +++ llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -323,7 +323,7 @@ // Create phi-defs at Begin for all live-in registers. SlotIndex Begin = Indexes->getMBBStartIdx(&MBB); - DEBUG(dbgs() << Begin << "\tBB#" << MBB.getNumber()); + DEBUG(dbgs() << Begin << "\t" << printMBBReference(MBB)); for (const auto &LI : MBB.liveins()) { for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) { unsigned Unit = *Units; Index: llvm/trunk/lib/CodeGen/LiveRangeCalc.cpp =================================================================== --- llvm/trunk/lib/CodeGen/LiveRangeCalc.cpp +++ llvm/trunk/lib/CodeGen/LiveRangeCalc.cpp @@ -377,7 +377,7 @@ MBB->getParent()->verify(); const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo(); errs() << "The register " << printReg(PhysReg, TRI) - << " needs to be live in to BB#" << MBB->getNumber() + << " needs to be live in to " << printMBBReference(*MBB) << ", but is missing from the live-in list.\n"; report_fatal_error("Invalid global physical register"); } Index: llvm/trunk/lib/CodeGen/MIRParser/MILexer.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MIRParser/MILexer.cpp +++ llvm/trunk/lib/CodeGen/MIRParser/MILexer.cpp @@ -277,6 +277,9 @@ C.advance(); StringRef Number = NumberRange.upto(C); unsigned StringOffset = PrefixLength + Number.size(); // Drop '%bb.' + // TODO: The format bb.. is supported only when it's not a + // reference. Once we deprecate the format where the irname shows up, we + // should only lex forward if it is a reference. if (C.peek() == '.') { C.advance(); // Skip '.' ++StringOffset; Index: llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp +++ llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp @@ -1344,6 +1344,8 @@ return error(Twine("use of undefined machine basic block #") + Twine(Number)); MBB = MBBInfo->second; + // TODO: Only parse the name if it's a MachineBasicBlockLabel. Deprecate once + // we drop the from the bb.. format. if (!Token.stringValue().empty() && Token.stringValue() != MBB->getName()) return error(Twine("the name of machine basic block #") + Twine(Number) + " isn't '" + Token.stringValue() + "'"); Index: llvm/trunk/lib/CodeGen/MIRPrinter.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MIRPrinter.cpp +++ llvm/trunk/lib/CodeGen/MIRPrinter.cpp @@ -157,7 +157,6 @@ void print(const MachineBasicBlock &MBB); void print(const MachineInstr &MI); - void printMBBReference(const MachineBasicBlock &MBB); void printIRBlockReference(const BasicBlock &BB); void printIRValueReference(const Value &V); void printStackObjectReference(int FrameIndex); @@ -338,13 +337,11 @@ YamlMFI.HasMustTailInVarArgFunc = MFI.hasMustTailInVarArgFunc(); if (MFI.getSavePoint()) { raw_string_ostream StrOS(YamlMFI.SavePoint.Value); - MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping) - .printMBBReference(*MFI.getSavePoint()); + StrOS << printMBBReference(*MFI.getSavePoint()); } if (MFI.getRestorePoint()) { raw_string_ostream StrOS(YamlMFI.RestorePoint.Value); - MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping) - .printMBBReference(*MFI.getRestorePoint()); + StrOS << printMBBReference(*MFI.getRestorePoint()); } } @@ -493,8 +490,7 @@ Entry.ID = ID++; for (const auto *MBB : Table.MBBs) { raw_string_ostream StrOS(Str); - MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping) - .printMBBReference(*MBB); + StrOS << printMBBReference(*MBB); Entry.Blocks.push_back(StrOS.str()); Str.clear(); } @@ -616,7 +612,7 @@ for (auto I = MBB.succ_begin(), E = MBB.succ_end(); I != E; ++I) { if (I != MBB.succ_begin()) OS << ", "; - printMBBReference(**I); + OS << printMBBReference(**I); if (!SimplifyMIR || !canPredictProbs) OS << '(' << format("0x%08" PRIx32, MBB.getSuccProbability(I).getNumerator()) @@ -764,14 +760,6 @@ } } -void MIPrinter::printMBBReference(const MachineBasicBlock &MBB) { - OS << "%bb." << MBB.getNumber(); - if (const auto *BB = MBB.getBasicBlock()) { - if (BB->hasName()) - OS << '.' << BB->getName(); - } -} - static void printIRSlotNumber(raw_ostream &OS, int Slot) { if (Slot == -1) OS << ""; @@ -967,7 +955,7 @@ Op.getFPImm()->printAsOperand(OS, /*PrintType=*/true, MST); break; case MachineOperand::MO_MachineBasicBlock: - printMBBReference(*Op.getMBB()); + OS << printMBBReference(*Op.getMBB()); break; case MachineOperand::MO_FrameIndex: printStackObjectReference(Op.getIndex()); Index: llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp +++ llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp @@ -70,6 +70,10 @@ return OS; } +Printable llvm::printMBBReference(const MachineBasicBlock &MBB) { + return Printable([&MBB](raw_ostream &OS) { return MBB.printAsOperand(OS); }); +} + /// When an MBB is added to an MF, we need to update the parent pointer of the /// MBB, the MBB numbering, and any instructions in the MBB to be on the right /// operand list for registers. @@ -281,7 +285,7 @@ if (Indexes) OS << Indexes->getMBBStartIdx(this) << '\t'; - OS << "BB#" << getNumber() << ": "; + OS << printMBBReference(*this) << ": "; const char *Comma = ""; if (const BasicBlock *LBB = getBasicBlock()) { @@ -313,7 +317,7 @@ if (Indexes) OS << '\t'; OS << " Predecessors according to CFG:"; for (const_pred_iterator PI = pred_begin(), E = pred_end(); PI != E; ++PI) - OS << " BB#" << (*PI)->getNumber(); + OS << " " << printMBBReference(*(*PI)); OS << '\n'; } @@ -334,7 +338,7 @@ if (Indexes) OS << '\t'; OS << " Successors according to CFG:"; for (const_succ_iterator SI = succ_begin(), E = succ_end(); SI != E; ++SI) { - OS << " BB#" << (*SI)->getNumber(); + OS << " " << printMBBReference(*(*SI)); if (!Probs.empty()) OS << '(' << *getProbabilityIterator(SI) << ')'; } @@ -350,7 +354,7 @@ void MachineBasicBlock::printAsOperand(raw_ostream &OS, bool /*PrintType*/) const { - OS << "BB#" << getNumber(); + OS << "%bb." << getNumber(); } void MachineBasicBlock::removeLiveIn(MCPhysReg Reg, LaneBitmask LaneMask) { @@ -767,10 +771,9 @@ MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock(); MF->insert(std::next(MachineFunction::iterator(this)), NMBB); - DEBUG(dbgs() << "Splitting critical edge:" - " BB#" << getNumber() - << " -- BB#" << NMBB->getNumber() - << " -- BB#" << Succ->getNumber() << '\n'); + DEBUG(dbgs() << "Splitting critical edge: " << printMBBReference(*this) + << " -- " << printMBBReference(*NMBB) << " -- " + << printMBBReference(*Succ) << '\n'); LiveIntervals *LIS = P.getAnalysisIfAvailable(); SlotIndexes *Indexes = P.getAnalysisIfAvailable(); @@ -1023,8 +1026,8 @@ // case that we can't handle. Since this never happens in properly optimized // code, just skip those edges. if (TBB && TBB == FBB) { - DEBUG(dbgs() << "Won't split critical edge after degenerate BB#" - << getNumber() << '\n'); + DEBUG(dbgs() << "Won't split critical edge after degenerate " + << printMBBReference(*this) << '\n'); return false; } return true; Index: llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp +++ llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp @@ -546,7 +546,7 @@ static std::string getBlockName(const MachineBasicBlock *BB) { std::string Result; raw_string_ostream OS(Result); - OS << "BB#" << BB->getNumber(); + OS << printMBBReference(*BB); OS << " ('" << BB->getName() << "')"; OS.flush(); return Result; Index: llvm/trunk/lib/CodeGen/MachineBranchProbabilityInfo.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineBranchProbabilityInfo.cpp +++ llvm/trunk/lib/CodeGen/MachineBranchProbabilityInfo.cpp @@ -84,7 +84,7 @@ const MachineBasicBlock *Dst) const { const BranchProbability Prob = getEdgeProbability(Src, Dst); - OS << "edge MBB#" << Src->getNumber() << " -> MBB#" << Dst->getNumber() + OS << "edge " << printMBBReference(*Src) << " -> " << printMBBReference(*Dst) << " probability is " << Prob << (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n"); Index: llvm/trunk/lib/CodeGen/MachineFunction.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineFunction.cpp +++ llvm/trunk/lib/CodeGen/MachineFunction.cpp @@ -546,7 +546,7 @@ raw_string_ostream OSS(OutStr); if (isSimple()) { - OSS << "BB#" << Node->getNumber(); + OSS << printMBBReference(*Node); if (const BasicBlock *BB = Node->getBasicBlock()) OSS << ": " << BB->getName(); } else @@ -908,7 +908,7 @@ for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) { OS << " jt#" << i << ": "; for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j) - OS << " BB#" << JumpTables[i].MBBs[j]->getNumber(); + OS << ' ' << printMBBReference(*JumpTables[i].MBBs[j]); } OS << '\n'; Index: llvm/trunk/lib/CodeGen/MachineLICM.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineLICM.cpp +++ llvm/trunk/lib/CodeGen/MachineLICM.cpp @@ -563,8 +563,8 @@ // Now move the instructions to the predecessor, inserting it before any // terminator instructions. - DEBUG(dbgs() << "Hoisting to BB#" << Preheader->getNumber() << " from BB#" - << MI->getParent()->getNumber() << ": " << *MI); + DEBUG(dbgs() << "Hoisting to " << printMBBReference(*Preheader) << " from " + << printMBBReference(*MI->getParent()) << ": " << *MI); // Splice the instruction to the preheader. MachineBasicBlock *MBB = MI->getParent(); @@ -601,14 +601,14 @@ } void MachineLICM::EnterScope(MachineBasicBlock *MBB) { - DEBUG(dbgs() << "Entering BB#" << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Entering " << printMBBReference(*MBB) << '\n'); // Remember livein register pressure. BackTrace.push_back(RegPressure); } void MachineLICM::ExitScope(MachineBasicBlock *MBB) { - DEBUG(dbgs() << "Exiting BB#" << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Exiting " << printMBBReference(*MBB) << '\n'); BackTrace.pop_back(); } @@ -1336,9 +1336,9 @@ DEBUG({ dbgs() << "Hoisting " << *MI; if (MI->getParent()->getBasicBlock()) - dbgs() << " from BB#" << MI->getParent()->getNumber(); + dbgs() << " from " << printMBBReference(*MI->getParent()); if (Preheader->getBasicBlock()) - dbgs() << " to BB#" << Preheader->getNumber(); + dbgs() << " to " << printMBBReference(*Preheader); dbgs() << "\n"; }); Index: llvm/trunk/lib/CodeGen/MachineOperand.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineOperand.cpp +++ llvm/trunk/lib/CodeGen/MachineOperand.cpp @@ -428,7 +428,7 @@ } break; case MachineOperand::MO_MachineBasicBlock: - OS << "getNumber() << ">"; + OS << printMBBReference(*getMBB()); break; case MachineOperand::MO_FrameIndex: OS << "'; Index: llvm/trunk/lib/CodeGen/MachineScheduler.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineScheduler.cpp +++ llvm/trunk/lib/CodeGen/MachineScheduler.cpp @@ -98,7 +98,7 @@ static cl::opt SchedOnlyFunc("misched-only-func", cl::Hidden, cl::desc("Only schedule this function")); static cl::opt SchedOnlyBlock("misched-only-block", cl::Hidden, - cl::desc("Only schedule this MBB#")); + cl::desc("Only schedule this MBB#")); #else static bool ViewMISchedDAGs = false; #endif // NDEBUG @@ -548,15 +548,14 @@ continue; } DEBUG(dbgs() << "********** MI Scheduling **********\n"); - DEBUG(dbgs() << MF->getName() - << ":BB#" << MBB->getNumber() << " " << MBB->getName() - << "\n From: " << *I << " To: "; + DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB) << " " + << MBB->getName() << "\n From: " << *I << " To: "; if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; else dbgs() << "End"; dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'); if (DumpCriticalPathLength) { errs() << MF->getName(); - errs() << ":BB# " << MBB->getNumber(); + errs() << ":%bb. " << MBB->getNumber(); errs() << " " << MBB->getName() << " \n"; } @@ -823,11 +822,11 @@ placeDebugValues(); DEBUG({ - unsigned BBNum = begin()->getParent()->getNumber(); - dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; - dumpSchedule(); - dbgs() << '\n'; - }); + dbgs() << "*** Final schedule for " + << printMBBReference(*begin()->getParent()) << " ***\n"; + dumpSchedule(); + dbgs() << '\n'; + }); } /// Apply each ScheduleDAGMutation step in order. @@ -1261,11 +1260,11 @@ placeDebugValues(); DEBUG({ - unsigned BBNum = begin()->getParent()->getNumber(); - dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; - dumpSchedule(); - dbgs() << '\n'; - }); + dbgs() << "*** Final schedule for " + << printMBBReference(*begin()->getParent()) << " ***\n"; + dumpSchedule(); + dbgs() << '\n'; + }); } /// Build the DAG and setup three register pressure trackers. Index: llvm/trunk/lib/CodeGen/MachineSink.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineSink.cpp +++ llvm/trunk/lib/CodeGen/MachineSink.cpp @@ -243,17 +243,17 @@ // into and they are all PHI nodes. In this case, machine-sink must break // the critical edge first. e.g. // - // BB#1: derived from LLVM BB %bb4.preheader - // Predecessors according to CFG: BB#0 + // %bb.1: derived from LLVM BB %bb4.preheader + // Predecessors according to CFG: %bb.0 // ... // %reg16385 = DEC64_32r %reg16437, %eflags // ... - // JE_4 , %eflags - // Successors according to CFG: BB#37 BB#2 + // JE_4 <%bb.37>, %eflags + // Successors according to CFG: %bb.37 %bb.2 // - // BB#2: derived from LLVM BB %bb.nph - // Predecessors according to CFG: BB#0 BB#1 - // %reg16386 = PHI %reg16434, , %reg16385, + // %bb.2: derived from LLVM BB %bb.nph + // Predecessors according to CFG: %bb.0 %bb.1 + // %reg16386 = PHI %reg16434, %bb.0, %reg16385, %bb.1 BreakPHIEdge = true; for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) { MachineInstr *UseInst = MO.getParent(); @@ -321,10 +321,10 @@ for (auto &Pair : ToSplit) { auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, *this); if (NewSucc != nullptr) { - DEBUG(dbgs() << " *** Splitting critical edge:" - " BB#" << Pair.first->getNumber() - << " -- BB#" << NewSucc->getNumber() - << " -- BB#" << Pair.second->getNumber() << '\n'); + DEBUG(dbgs() << " *** Splitting critical edge: " + << printMBBReference(*Pair.first) << " -- " + << printMBBReference(*NewSucc) << " -- " + << printMBBReference(*Pair.second) << '\n'); MadeChange = true; ++NumSplit; } else @@ -460,33 +460,33 @@ // It's not always legal to break critical edges and sink the computation // to the edge. // - // BB#1: + // %bb.1: // v1024 - // Beq BB#3 + // Beq %bb.3 // - // BB#2: + // %bb.2: // ... no uses of v1024 // - // BB#3: + // %bb.3: // ... // = v1024 // - // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted: + // If %bb.1 -> %bb.3 edge is broken and computation of v1024 is inserted: // - // BB#1: + // %bb.1: // ... - // Bne BB#2 - // BB#4: + // Bne %bb.2 + // %bb.4: // v1024 = - // B BB#3 - // BB#2: + // B %bb.3 + // %bb.2: // ... no uses of v1024 // - // BB#3: + // %bb.3: // ... // = v1024 // - // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3 + // This is incorrect since v1024 is not computed along the %bb.1->%bb.2->%bb.3 // flow. We need to ensure the new basic block where the computation is // sunk to dominates all the uses. // It's only legal to break critical edge and sink the computation to the Index: llvm/trunk/lib/CodeGen/MachineTraceMetrics.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineTraceMetrics.cpp +++ llvm/trunk/lib/CodeGen/MachineTraceMetrics.cpp @@ -396,7 +396,8 @@ } void MachineTraceMetrics::invalidate(const MachineBasicBlock *MBB) { - DEBUG(dbgs() << "Invalidate traces through BB#" << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Invalidate traces through " << printMBBReference(*MBB) + << '\n'); BlockInfo[MBB->getNumber()].invalidate(); for (unsigned i = 0; i != TS_NumStrategies; ++i) if (Ensembles[i]) @@ -476,8 +477,8 @@ /// Compute the trace through MBB. void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) { - DEBUG(dbgs() << "Computing " << getName() << " trace through BB#" - << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Computing " << getName() << " trace through " + << printMBBReference(*MBB) << '\n'); // Set up loop bounds for the backwards post-order traversal. LoopBounds Bounds(BlockInfo, MTM.Loops); @@ -485,13 +486,13 @@ Bounds.Downward = false; Bounds.Visited.clear(); for (auto I : inverse_post_order_ext(MBB, Bounds)) { - DEBUG(dbgs() << " pred for BB#" << I->getNumber() << ": "); + DEBUG(dbgs() << " pred for " << printMBBReference(*I) << ": "); TraceBlockInfo &TBI = BlockInfo[I->getNumber()]; // All the predecessors have been visited, pick the preferred one. TBI.Pred = pickTracePred(I); DEBUG({ if (TBI.Pred) - dbgs() << "BB#" << TBI.Pred->getNumber() << '\n'; + dbgs() << printMBBReference(*TBI.Pred) << '\n'; else dbgs() << "null\n"; }); @@ -503,13 +504,13 @@ Bounds.Downward = true; Bounds.Visited.clear(); for (auto I : post_order_ext(MBB, Bounds)) { - DEBUG(dbgs() << " succ for BB#" << I->getNumber() << ": "); + DEBUG(dbgs() << " succ for " << printMBBReference(*I) << ": "); TraceBlockInfo &TBI = BlockInfo[I->getNumber()]; // All the successors have been visited, pick the preferred one. TBI.Succ = pickTraceSucc(I); DEBUG({ if (TBI.Succ) - dbgs() << "BB#" << TBI.Succ->getNumber() << '\n'; + dbgs() << printMBBReference(*TBI.Succ) << '\n'; else dbgs() << "null\n"; }); @@ -530,8 +531,8 @@ WorkList.push_back(BadMBB); do { const MachineBasicBlock *MBB = WorkList.pop_back_val(); - DEBUG(dbgs() << "Invalidate BB#" << MBB->getNumber() << ' ' << getName() - << " height.\n"); + DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' ' + << getName() << " height.\n"); // Find any MBB predecessors that have MBB as their preferred successor. // They are the only ones that need to be invalidated. for (const MachineBasicBlock *Pred : MBB->predecessors()) { @@ -555,8 +556,8 @@ WorkList.push_back(BadMBB); do { const MachineBasicBlock *MBB = WorkList.pop_back_val(); - DEBUG(dbgs() << "Invalidate BB#" << MBB->getNumber() << ' ' << getName() - << " depth.\n"); + DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' ' + << getName() << " depth.\n"); // Find any MBB successors that have MBB as their preferred predecessor. // They are the only ones that need to be invalidated. for (const MachineBasicBlock *Succ : MBB->successors()) { @@ -859,7 +860,7 @@ // Go through trace blocks in top-down order, stopping after the center block. while (!Stack.empty()) { MBB = Stack.pop_back_val(); - DEBUG(dbgs() << "\nDepths for BB#" << MBB->getNumber() << ":\n"); + DEBUG(dbgs() << "\nDepths for " << printMBBReference(*MBB) << ":\n"); TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()]; TBI.HasValidInstrDepths = true; TBI.CriticalPath = 0; @@ -1044,7 +1045,7 @@ SmallVector Deps; for (;!Stack.empty(); Stack.pop_back()) { MBB = Stack.back(); - DEBUG(dbgs() << "Heights for BB#" << MBB->getNumber() << ":\n"); + DEBUG(dbgs() << "Heights for " << printMBBReference(*MBB) << ":\n"); TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()]; TBI.HasValidInstrHeights = true; TBI.CriticalPath = 0; @@ -1131,7 +1132,7 @@ // Update virtual live-in heights. They were added by addLiveIns() with a 0 // height because the final height isn't known until now. - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " Live-ins:"); + DEBUG(dbgs() << printMBBReference(*MBB) << " Live-ins:"); for (LiveInReg &LIR : TBI.LiveIns) { const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg); LIR.Height = Heights.lookup(DefMI); @@ -1289,7 +1290,7 @@ void MachineTraceMetrics::Ensemble::print(raw_ostream &OS) const { OS << getName() << " ensemble:\n"; for (unsigned i = 0, e = BlockInfo.size(); i != e; ++i) { - OS << " BB#" << i << '\t'; + OS << " %bb." << i << '\t'; BlockInfo[i].print(OS); OS << '\n'; } @@ -1299,10 +1300,10 @@ if (hasValidDepth()) { OS << "depth=" << InstrDepth; if (Pred) - OS << " pred=BB#" << Pred->getNumber(); + OS << " pred=" << printMBBReference(*Pred); else OS << " pred=null"; - OS << " head=BB#" << Head; + OS << " head=%bb." << Head; if (HasValidInstrDepths) OS << " +instrs"; } else @@ -1311,10 +1312,10 @@ if (hasValidHeight()) { OS << "height=" << InstrHeight; if (Succ) - OS << " succ=BB#" << Succ->getNumber(); + OS << " succ=" << printMBBReference(*Succ); else OS << " succ=null"; - OS << " tail=BB#" << Tail; + OS << " tail=%bb." << Tail; if (HasValidInstrHeights) OS << " +instrs"; } else @@ -1326,18 +1327,18 @@ void MachineTraceMetrics::Trace::print(raw_ostream &OS) const { unsigned MBBNum = &TBI - &TE.BlockInfo[0]; - OS << TE.getName() << " trace BB#" << TBI.Head << " --> BB#" << MBBNum - << " --> BB#" << TBI.Tail << ':'; + OS << TE.getName() << " trace %bb." << TBI.Head << " --> %bb." << MBBNum + << " --> %bb." << TBI.Tail << ':'; if (TBI.hasValidHeight() && TBI.hasValidDepth()) OS << ' ' << getInstrCount() << " instrs."; if (TBI.HasValidInstrDepths && TBI.HasValidInstrHeights) OS << ' ' << TBI.CriticalPath << " cycles."; const MachineTraceMetrics::TraceBlockInfo *Block = &TBI; - OS << "\nBB#" << MBBNum; + OS << "\n%bb." << MBBNum; while (Block->hasValidDepth() && Block->Pred) { unsigned Num = Block->Pred->getNumber(); - OS << " <- BB#" << Num; + OS << " <- " << printMBBReference(*Block->Pred); Block = &TE.BlockInfo[Num]; } @@ -1345,7 +1346,7 @@ OS << "\n "; while (Block->hasValidHeight() && Block->Succ) { unsigned Num = Block->Succ->getNumber(); - OS << " -> BB#" << Num; + OS << " -> " << printMBBReference(*Block->Succ); Block = &TE.BlockInfo[Num]; } OS << '\n'; Index: llvm/trunk/lib/CodeGen/MachineVerifier.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineVerifier.cpp +++ llvm/trunk/lib/CodeGen/MachineVerifier.cpp @@ -471,9 +471,8 @@ void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) { assert(MBB); report(msg, MBB->getParent()); - errs() << "- basic block: BB#" << MBB->getNumber() - << ' ' << MBB->getName() - << " (" << (const void*)MBB << ')'; + errs() << "- basic block: " << printMBBReference(*MBB) << ' ' + << MBB->getName() << " (" << (const void *)MBB << ')'; if (Indexes) errs() << " [" << Indexes->getMBBStartIdx(MBB) << ';' << Indexes->getMBBEndIdx(MBB) << ')'; @@ -619,8 +618,8 @@ report("MBB has successor that isn't part of the function.", MBB); if (!MBBInfoMap[*I].Preds.count(MBB)) { report("Inconsistent CFG", MBB); - errs() << "MBB is not in the predecessor list of the successor BB#" - << (*I)->getNumber() << ".\n"; + errs() << "MBB is not in the predecessor list of the successor " + << printMBBReference(*(*I)) << ".\n"; } } @@ -631,8 +630,8 @@ report("MBB has predecessor that isn't part of the function.", MBB); if (!MBBInfoMap[*I].Succs.count(MBB)) { report("Inconsistent CFG", MBB); - errs() << "MBB is not in the successor list of the predecessor BB#" - << (*I)->getNumber() << ".\n"; + errs() << "MBB is not in the successor list of the predecessor " + << printMBBReference(*(*I)) << ".\n"; } } @@ -1663,8 +1662,8 @@ for (MachineBasicBlock *Pred : MBB.predecessors()) { if (!seen.count(Pred)) { report("Missing PHI operand", &Phi); - errs() << "BB#" << Pred->getNumber() - << " is a predecessor according to the CFG.\n"; + errs() << printMBBReference(*Pred) + << " is a predecessor according to the CFG.\n"; } } } @@ -2038,8 +2037,8 @@ report("Register not marked live out of predecessor", *PI); report_context(LR, Reg, LaneMask); report_context(*VNI); - errs() << " live into BB#" << MFI->getNumber() - << '@' << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " + errs() << " live into " << printMBBReference(*MFI) << '@' + << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd << '\n'; continue; } @@ -2048,9 +2047,9 @@ if (!IsPHI && PVNI != VNI) { report("Different value live out of predecessor", *PI); report_context(LR, Reg, LaneMask); - errs() << "Valno #" << PVNI->id << " live out of BB#" - << (*PI)->getNumber() << '@' << PEnd << "\nValno #" << VNI->id - << " live into BB#" << MFI->getNumber() << '@' + errs() << "Valno #" << PVNI->id << " live out of " + << printMBBReference(*(*PI)) << '@' << PEnd << "\nValno #" + << VNI->id << " live into " << printMBBReference(*MFI) << '@' << LiveInts->getMBBStartIdx(&*MFI) << '\n'; } } @@ -2201,11 +2200,11 @@ (SPState[(*I)->getNumber()].ExitValue != BBState.EntryValue || SPState[(*I)->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) { report("The exit stack state of a predecessor is inconsistent.", MBB); - errs() << "Predecessor BB#" << (*I)->getNumber() << " has exit state (" - << SPState[(*I)->getNumber()].ExitValue << ", " - << SPState[(*I)->getNumber()].ExitIsSetup - << "), while BB#" << MBB->getNumber() << " has entry state (" - << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n"; + errs() << "Predecessor " << printMBBReference(*(*I)) + << " has exit state (" << SPState[(*I)->getNumber()].ExitValue + << ", " << SPState[(*I)->getNumber()].ExitIsSetup << "), while " + << printMBBReference(*MBB) << " has entry state (" + << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n"; } } @@ -2217,11 +2216,11 @@ (SPState[(*I)->getNumber()].EntryValue != BBState.ExitValue || SPState[(*I)->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) { report("The entry stack state of a successor is inconsistent.", MBB); - errs() << "Successor BB#" << (*I)->getNumber() << " has entry state (" - << SPState[(*I)->getNumber()].EntryValue << ", " - << SPState[(*I)->getNumber()].EntryIsSetup - << "), while BB#" << MBB->getNumber() << " has exit state (" - << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n"; + errs() << "Successor " << printMBBReference(*(*I)) + << " has entry state (" << SPState[(*I)->getNumber()].EntryValue + << ", " << SPState[(*I)->getNumber()].EntryIsSetup << "), while " + << printMBBReference(*MBB) << " has exit state (" + << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n"; } } Index: llvm/trunk/lib/CodeGen/PHIElimination.cpp =================================================================== --- llvm/trunk/lib/CodeGen/PHIElimination.cpp +++ llvm/trunk/lib/CodeGen/PHIElimination.cpp @@ -593,9 +593,9 @@ if (!ShouldSplit && !NoPhiElimLiveOutEarlyExit) continue; if (ShouldSplit) { - DEBUG(dbgs() << printReg(Reg) << " live-out before critical edge BB#" - << PreMBB->getNumber() << " -> BB#" << MBB.getNumber() - << ": " << *BBI); + DEBUG(dbgs() << printReg(Reg) << " live-out before critical edge " + << printMBBReference(*PreMBB) << " -> " + << printMBBReference(MBB) << ": " << *BBI); } // If Reg is not live-in to MBB, it means it must be live-in to some Index: llvm/trunk/lib/CodeGen/PostRASchedulerList.cpp =================================================================== --- llvm/trunk/lib/CodeGen/PostRASchedulerList.cpp +++ llvm/trunk/lib/CodeGen/PostRASchedulerList.cpp @@ -322,8 +322,8 @@ static int bbcnt = 0; if (bbcnt++ % DebugDiv != DebugMod) continue; - dbgs() << "*** DEBUG scheduling " << Fn.getName() - << ":BB#" << MBB.getNumber() << " ***\n"; + dbgs() << "*** DEBUG scheduling " << Fn.getName() << ":" + << printMBBReference(MBB) << " ***\n"; } #endif Index: llvm/trunk/lib/CodeGen/ProcessImplicitDefs.cpp =================================================================== --- llvm/trunk/lib/CodeGen/ProcessImplicitDefs.cpp +++ llvm/trunk/lib/CodeGen/ProcessImplicitDefs.cpp @@ -154,7 +154,7 @@ if (WorkList.empty()) continue; - DEBUG(dbgs() << "BB#" << MFI->getNumber() << " has " << WorkList.size() + DEBUG(dbgs() << printMBBReference(*MFI) << " has " << WorkList.size() << " implicit defs.\n"); Changed = true; Index: llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp =================================================================== --- llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp +++ llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp @@ -1612,7 +1612,7 @@ // Create separate intervals for isolated blocks with multiple uses. if (!IntvIn && !IntvOut) { - DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); + DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n"); if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) SE->splitSingleBlock(BI); continue; Index: llvm/trunk/lib/CodeGen/RegisterCoalescer.cpp =================================================================== --- llvm/trunk/lib/CodeGen/RegisterCoalescer.cpp +++ llvm/trunk/lib/CodeGen/RegisterCoalescer.cpp @@ -991,8 +991,8 @@ // Now ok to move copy. if (CopyLeftBB) { - DEBUG(dbgs() << "\tremovePartialRedundancy: Move the copy to BB#" - << CopyLeftBB->getNumber() << '\t' << CopyMI); + DEBUG(dbgs() << "\tremovePartialRedundancy: Move the copy to " + << printMBBReference(*CopyLeftBB) << '\t' << CopyMI); // Insert new copy to CopyLeftBB. auto InsPos = CopyLeftBB->getFirstTerminator(); @@ -1010,8 +1010,8 @@ // the deleted list. ErasedInstrs.erase(NewCopyMI); } else { - DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from BB#" - << MBB.getNumber() << '\t' << CopyMI); + DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from " + << printMBBReference(MBB) << '\t' << CopyMI); } // Remove CopyMI. @@ -2376,7 +2376,7 @@ if (OtherV.ErasableImplicitDef && DefMI && DefMI->getParent() != Indexes->getMBBFromIndex(V.OtherVNI->def)) { DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def - << " extends into BB#" << DefMI->getParent()->getNumber() + << " extends into " << printMBBReference(*DefMI->getParent()) << ", keeping it.\n"); OtherV.ErasableImplicitDef = false; } Index: llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp =================================================================== --- llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp +++ llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -1043,7 +1043,7 @@ } void ScheduleDAGInstrs::fixupKills(MachineBasicBlock &MBB) { - DEBUG(dbgs() << "Fixup kills for BB#" << MBB.getNumber() << '\n'); + DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB) << '\n'); LiveRegs.init(*TRI); LiveRegs.addLiveOuts(MBB); Index: llvm/trunk/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -346,9 +346,8 @@ /// Schedule - Schedule the DAG using list scheduling. void ScheduleDAGRRList::Schedule() { - DEBUG(dbgs() - << "********** List Scheduling BB#" << BB->getNumber() - << " '" << BB->getName() << "' **********\n"); + DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB) + << " '" << BB->getName() << "' **********\n"); CurCycle = 0; IssueCount = 0; Index: llvm/trunk/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp @@ -93,9 +93,8 @@ /// Schedule - Schedule the DAG using list scheduling. void ScheduleDAGVLIW::Schedule() { - DEBUG(dbgs() - << "********** List Scheduling BB#" << BB->getNumber() - << " '" << BB->getName() << "' **********\n"); + DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB) + << " '" << BB->getName() << "' **********\n"); // Build the scheduling graph. BuildSchedGraph(AA); Index: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -730,8 +730,9 @@ BlockName = (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str(); } - DEBUG(dbgs() << "Initial selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Initial selection DAG: " << printMBBReference(*FuncInfo->MBB) + << " '" << BlockName << "'\n"; + CurDAG->dump()); if (ViewDAGCombine1 && MatchFilterBB) CurDAG->viewGraph("dag-combine1 input for " + BlockName); @@ -743,8 +744,10 @@ CurDAG->Combine(BeforeLegalizeTypes, AA, OptLevel); } - DEBUG(dbgs() << "Optimized lowered selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Optimized lowered selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); // Second step, hack on the DAG until it only uses operations and types that // the target supports. @@ -758,8 +761,10 @@ Changed = CurDAG->LegalizeTypes(); } - DEBUG(dbgs() << "Type-legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Type-legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); // Only allow creation of legal node types. CurDAG->NewNodesMustHaveLegalTypes = true; @@ -775,8 +780,10 @@ CurDAG->Combine(AfterLegalizeTypes, AA, OptLevel); } - DEBUG(dbgs() << "Optimized type-legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Optimized type-legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); } { @@ -786,8 +793,10 @@ } if (Changed) { - DEBUG(dbgs() << "Vector-legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Vector-legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); { NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName, @@ -795,8 +804,10 @@ CurDAG->LegalizeTypes(); } - DEBUG(dbgs() << "Vector/type-legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Vector/type-legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); if (ViewDAGCombineLT && MatchFilterBB) CurDAG->viewGraph("dag-combine-lv input for " + BlockName); @@ -808,8 +819,10 @@ CurDAG->Combine(AfterLegalizeVectorOps, AA, OptLevel); } - DEBUG(dbgs() << "Optimized vector-legalized selection DAG: BB#" - << BlockNumber << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Optimized vector-legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); } if (ViewLegalizeDAGs && MatchFilterBB) @@ -821,8 +834,10 @@ CurDAG->Legalize(); } - DEBUG(dbgs() << "Legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); if (ViewDAGCombine2 && MatchFilterBB) CurDAG->viewGraph("dag-combine2 input for " + BlockName); @@ -834,8 +849,10 @@ CurDAG->Combine(AfterLegalizeDAG, AA, OptLevel); } - DEBUG(dbgs() << "Optimized legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Optimized legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); if (OptLevel != CodeGenOpt::None) ComputeLiveOutVRegInfo(); @@ -851,8 +868,10 @@ DoInstructionSelection(); } - DEBUG(dbgs() << "Selected selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Selected selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); if (ViewSchedDAGs && MatchFilterBB) CurDAG->viewGraph("scheduler input for " + BlockName); @@ -919,9 +938,9 @@ } // end anonymous namespace void SelectionDAGISel::DoInstructionSelection() { - DEBUG(dbgs() << "===== Instruction selection begins: BB#" - << FuncInfo->MBB->getNumber() - << " '" << FuncInfo->MBB->getName() << "'\n"); + DEBUG(dbgs() << "===== Instruction selection begins: " + << printMBBReference(*FuncInfo->MBB) << " '" + << FuncInfo->MBB->getName() << "'\n"); PreprocessISelDAG(); Index: llvm/trunk/lib/CodeGen/SlotIndexes.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SlotIndexes.cpp +++ llvm/trunk/lib/CodeGen/SlotIndexes.cpp @@ -264,7 +264,7 @@ } for (unsigned i = 0, e = MBBRanges.size(); i != e; ++i) - dbgs() << "BB#" << i << "\t[" << MBBRanges[i].first << ';' + dbgs() << "%bb." << i << "\t[" << MBBRanges[i].first << ';' << MBBRanges[i].second << ")\n"; } #endif Index: llvm/trunk/lib/CodeGen/SplitKit.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SplitKit.cpp +++ llvm/trunk/lib/CodeGen/SplitKit.cpp @@ -729,7 +729,8 @@ assert(OpenIdx && "openIntv not called before enterIntvAtEnd"); SlotIndex End = LIS.getMBBEndIdx(&MBB); SlotIndex Last = End.getPrevSlot(); - DEBUG(dbgs() << " enterIntvAtEnd BB#" << MBB.getNumber() << ", " << Last); + DEBUG(dbgs() << " enterIntvAtEnd " << printMBBReference(MBB) << ", " + << Last); VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Last); if (!ParentVNI) { DEBUG(dbgs() << ": not live\n"); @@ -808,7 +809,8 @@ SlotIndex SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) { assert(OpenIdx && "openIntv not called before leaveIntvAtTop"); SlotIndex Start = LIS.getMBBStartIdx(&MBB); - DEBUG(dbgs() << " leaveIntvAtTop BB#" << MBB.getNumber() << ", " << Start); + DEBUG(dbgs() << " leaveIntvAtTop " << printMBBReference(MBB) << ", " + << Start); VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Start); if (!ParentVNI) { @@ -906,15 +908,15 @@ // MBB isn't in a loop, it doesn't get any better. All dominators have a // higher frequency by definition. if (!Loop) { - DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#" - << MBB->getNumber() << " at depth 0\n"); + DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates " + << printMBBReference(*MBB) << " at depth 0\n"); return MBB; } // We'll never be able to exit the DefLoop. if (Loop == DefLoop) { - DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#" - << MBB->getNumber() << " in the same loop\n"); + DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates " + << printMBBReference(*MBB) << " in the same loop\n"); return MBB; } @@ -923,8 +925,8 @@ if (Depth < BestDepth) { BestMBB = MBB; BestDepth = Depth; - DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#" - << MBB->getNumber() << " at depth " << Depth << '\n'); + DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates " + << printMBBReference(*MBB) << " at depth " << Depth << '\n'); } // Leave loop by going to the immediate dominator of the loop header. @@ -1063,7 +1065,7 @@ DEBUG(dbgs() << "Multi-mapped complement " << VNI->id << '@' << VNI->def << " for parent " << ParentVNI->id << '@' << ParentVNI->def - << " hoist to BB#" << Dom.first->getNumber() << ' ' + << " hoist to " << printMBBReference(*Dom.first) << ' ' << Dom.second << '\n'); } @@ -1173,7 +1175,7 @@ if (Start != BlockStart) { VNInfo *VNI = LI.extendInBlock(BlockStart, std::min(BlockEnd, End)); assert(VNI && "Missing def for complex mapped value"); - DEBUG(dbgs() << ':' << VNI->id << "*BB#" << MBB->getNumber()); + DEBUG(dbgs() << ':' << VNI->id << "*" << printMBBReference(*MBB)); // MBB has its own def. Is it also live-out? if (BlockEnd <= End) LRC.setLiveOutValue(&*MBB, VNI); @@ -1186,7 +1188,7 @@ // Handle the live-in blocks covered by [Start;End). assert(Start <= BlockStart && "Expected live-in block"); while (BlockStart < End) { - DEBUG(dbgs() << ">BB#" << MBB->getNumber()); + DEBUG(dbgs() << ">" << printMBBReference(*MBB)); BlockEnd = LIS.getMBBEndIdx(&*MBB); if (BlockStart == ParentVNI->def) { // This block has the def of a parent PHI, so it isn't live-in. @@ -1329,7 +1331,7 @@ unsigned RegIdx = RegAssign.lookup(Idx); LiveInterval &LI = LIS.getInterval(Edit->get(RegIdx)); MO.setReg(LI.reg); - DEBUG(dbgs() << " rewr BB#" << MI->getParent()->getNumber() << '\t' + DEBUG(dbgs() << " rewr " << printMBBReference(*MI->getParent()) << '\t' << Idx << ':' << RegIdx << '\t' << *MI); // Extend liveness to Idx if the instruction reads reg. @@ -1563,9 +1565,9 @@ SlotIndex Start, Stop; std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(MBBNum); - DEBUG(dbgs() << "BB#" << MBBNum << " [" << Start << ';' << Stop - << ") intf " << LeaveBefore << '-' << EnterAfter - << ", live-through " << IntvIn << " -> " << IntvOut); + DEBUG(dbgs() << "%bb." << MBBNum << " [" << Start << ';' << Stop << ") intf " + << LeaveBefore << '-' << EnterAfter << ", live-through " + << IntvIn << " -> " << IntvOut); assert((IntvIn || IntvOut) && "Use splitSingleBlock for isolated blocks"); @@ -1665,7 +1667,7 @@ SlotIndex Start, Stop; std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB); - DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " [" << Start << ';' << Stop + DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';' << Stop << "), uses " << BI.FirstInstr << '-' << BI.LastInstr << ", reg-in " << IntvIn << ", leave before " << LeaveBefore << (BI.LiveOut ? ", stack-out" : ", killed in block")); @@ -1757,7 +1759,7 @@ SlotIndex Start, Stop; std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB); - DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " [" << Start << ';' << Stop + DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';' << Stop << "), uses " << BI.FirstInstr << '-' << BI.LastInstr << ", reg-out " << IntvOut << ", enter after " << EnterAfter << (BI.LiveIn ? ", stack-in" : ", defined in block")); Index: llvm/trunk/lib/CodeGen/StackColoring.cpp =================================================================== --- llvm/trunk/lib/CodeGen/StackColoring.cpp +++ llvm/trunk/lib/CodeGen/StackColoring.cpp @@ -739,7 +739,7 @@ } else { for (auto Slot : slots) { DEBUG(dbgs() << "Found a use of slot #" << Slot); - DEBUG(dbgs() << " at BB#" << MBB->getNumber() << " index "); + DEBUG(dbgs() << " at " << printMBBReference(*MBB) << " index "); DEBUG(Indexes->getInstructionIndex(MI).print(dbgs())); const AllocaInst *Allocation = MFI->getObjectAllocation(Slot); if (Allocation) { Index: llvm/trunk/lib/CodeGen/TailDuplicator.cpp =================================================================== --- llvm/trunk/lib/CodeGen/TailDuplicator.cpp +++ llvm/trunk/lib/CodeGen/TailDuplicator.cpp @@ -111,9 +111,10 @@ } } if (!Found) { - dbgs() << "Malformed PHI in BB#" << MBB->getNumber() << ": " << *MI; - dbgs() << " missing input from predecessor BB#" - << PredBB->getNumber() << '\n'; + dbgs() << "Malformed PHI in " << printMBBReference(*MBB) << ": " + << *MI; + dbgs() << " missing input from predecessor " + << printMBBReference(*PredBB) << '\n'; llvm_unreachable(nullptr); } } @@ -121,15 +122,16 @@ for (unsigned i = 1, e = MI->getNumOperands(); i != e; i += 2) { MachineBasicBlock *PHIBB = MI->getOperand(i + 1).getMBB(); if (CheckExtra && !Preds.count(PHIBB)) { - dbgs() << "Warning: malformed PHI in BB#" << MBB->getNumber() << ": " - << *MI; - dbgs() << " extra input from predecessor BB#" << PHIBB->getNumber() - << '\n'; + dbgs() << "Warning: malformed PHI in " << printMBBReference(*MBB) + << ": " << *MI; + dbgs() << " extra input from predecessor " + << printMBBReference(*PHIBB) << '\n'; llvm_unreachable(nullptr); } if (PHIBB->getNumber() < 0) { - dbgs() << "Malformed PHI in BB#" << MBB->getNumber() << ": " << *MI; - dbgs() << " non-existing BB#" << PHIBB->getNumber() << '\n'; + dbgs() << "Malformed PHI in " << printMBBReference(*MBB) << ": " + << *MI; + dbgs() << " non-existing " << printMBBReference(*PHIBB) << '\n'; llvm_unreachable(nullptr); } } @@ -783,7 +785,8 @@ MachineBasicBlock *ForcedLayoutPred, SmallVectorImpl &TDBBs, SmallVectorImpl &Copies) { - DEBUG(dbgs() << "\n*** Tail-duplicating BB#" << TailBB->getNumber() << '\n'); + DEBUG(dbgs() << "\n*** Tail-duplicating " << printMBBReference(*TailBB) + << '\n'); DenseSet UsedByPhi; getRegsUsedByPHIs(*TailBB, &UsedByPhi); Index: llvm/trunk/lib/Target/AArch64/AArch64ConditionOptimizer.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ConditionOptimizer.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ConditionOptimizer.cpp @@ -207,7 +207,7 @@ return nullptr; } } - DEBUG(dbgs() << "Flags not defined in BB#" << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) << '\n'); return nullptr; } Index: llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -369,7 +369,7 @@ return nullptr; } } - DEBUG(dbgs() << "Flags not defined in BB#" << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) << '\n'); return nullptr; } @@ -383,7 +383,7 @@ // Reject any live-in physregs. It's probably NZCV/EFLAGS, and very hard to // get right. if (!MBB->livein_empty()) { - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has live-ins.\n"); + DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n"); return false; } @@ -396,7 +396,7 @@ continue; if (++InstrCount > BlockInstrLimit && !Stress) { - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has more than " + DEBUG(dbgs() << printMBBReference(*MBB) << " has more than " << BlockInstrLimit << " instructions.\n"); return false; } @@ -458,8 +458,9 @@ return false; // The CFG topology checks out. - DEBUG(dbgs() << "\nTriangle: BB#" << Head->getNumber() << " -> BB#" - << CmpBB->getNumber() << " -> BB#" << Tail->getNumber() << '\n'); + DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> " + << printMBBReference(*CmpBB) << " -> " + << printMBBReference(*Tail) << '\n'); ++NumConsidered; // Tail is allowed to have many predecessors, but we can't handle PHIs yet. @@ -562,8 +563,9 @@ } void SSACCmpConv::convert(SmallVectorImpl &RemovedBlocks) { - DEBUG(dbgs() << "Merging BB#" << CmpBB->getNumber() << " into BB#" - << Head->getNumber() << ":\n" << *CmpBB); + DEBUG(dbgs() << "Merging " << printMBBReference(*CmpBB) << " into " + << printMBBReference(*Head) << ":\n" + << *CmpBB); // All CmpBB instructions are moved into Head, and CmpBB is deleted. // Update the CFG first. Index: llvm/trunk/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp @@ -12,9 +12,9 @@ // 1. For BBs that are targets of CBZ/CBNZ instructions, we know the value of // the CBZ/CBNZ source register is zero on the taken/not-taken path. For // instance, the copy instruction in the code below can be removed because -// the CBZW jumps to BB#2 when w0 is zero. +// the CBZW jumps to %bb.2 when w0 is zero. // -// BB#1: +// %bb.1: // cbz w0, .LBB0_2 // .LBB0_2: // mov w0, wzr ; <-- redundant @@ -22,11 +22,11 @@ // 2. If the flag setting instruction defines a register other than WZR/XZR, we // can remove a zero copy in some cases. // -// BB#0: +// %bb.0: // subs w0, w1, w2 // str w0, [x1] // b.ne .LBB0_2 -// BB#1: +// %bb.1: // mov w0, wzr ; <-- redundant // str w0, [x2] // .LBB0_2 @@ -35,7 +35,7 @@ // constant (i.e., ADDS[W|X]ri, SUBS[W|X]ri), we can remove a mov immediate // in some cases. // -// BB#0: +// %bb.0: // subs xzr, x0, #1 // b.eq .LBB0_1 // .LBB0_1: Index: llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp @@ -270,8 +270,8 @@ dbgs() << "Dest: " << printReg(Element.DestReg, TRI) << " Sources: {"; for (auto &SI : Element.Sources) { - dbgs() << printReg(SI.first, TRI) << "(BB#" - << SI.second->getNumber() << "),"; + dbgs() << printReg(SI.first, TRI) << '(' << printMBBReference(*SI.second) + << "),"; } dbgs() << "}\n"; } @@ -658,7 +658,7 @@ continue; } - DEBUG(dbgs() << "Visiting BB#" << MBB->getNumber() << "\n"); + DEBUG(dbgs() << "Visiting " << printMBBReference(*MBB) << "\n"); MBBMRT *NewMBB = new MBBMRT(MBB); MachineRegion *Region = RegionInfo->getRegionFor(MBB); @@ -705,7 +705,7 @@ // If this is live out of the MBB for (auto &UI : MRI->use_operands(Reg)) { if (UI.getParent()->getParent() != MBB) { - DEBUG(dbgs() << "Add LiveOut (MBB BB#" << MBB->getNumber() + DEBUG(dbgs() << "Add LiveOut (MBB " << printMBBReference(*MBB) << "): " << printReg(Reg, TRI) << "\n"); addLiveOut(Reg); } else { @@ -749,7 +749,8 @@ const MachineRegisterInfo *MRI, const TargetRegisterInfo *TRI, PHILinearize &PHIInfo) { - DEBUG(dbgs() << "-Store Live Outs Begin (BB#" << MBB->getNumber() << ")-\n"); + DEBUG(dbgs() << "-Store Live Outs Begin (" << printMBBReference(*MBB) + << ")-\n"); for (auto &II : *MBB) { for (auto &RI : II.defs()) { storeLiveOutReg(MBB, RI.getReg(), RI.getParent(), MRI, TRI, PHIInfo); @@ -773,8 +774,8 @@ for (int i = 0; i < numPreds; ++i) { if (getPHIPred(PHI, i) == MBB) { unsigned PHIReg = getPHISourceReg(PHI, i); - DEBUG(dbgs() << "Add LiveOut (PhiSource BB#" << MBB->getNumber() - << " -> BB#" << (*SI)->getNumber() + DEBUG(dbgs() << "Add LiveOut (PhiSource " << printMBBReference(*MBB) + << " -> " << printMBBReference(*(*SI)) << "): " << printReg(PHIReg, TRI) << "\n"); addLiveOut(PHIReg); } @@ -1480,8 +1481,8 @@ if (SourceMBB) { MIB.addReg(CombinedSourceReg); MIB.addMBB(SourceMBB); - DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", BB#" - << SourceMBB->getNumber()); + DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", " + << printMBBReference(*SourceMBB)); } for (unsigned i = 0; i < NumInputs; ++i) { @@ -1492,8 +1493,8 @@ MachineBasicBlock *SourcePred = getPHIPred(PHI, i); MIB.addReg(SourceReg); MIB.addMBB(SourcePred); - DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#" - << SourcePred->getNumber()); + DEBUG(dbgs() << printReg(SourceReg, TRI) << ", " + << printMBBReference(*SourcePred)); } DEBUG(dbgs() << ")\n"); } @@ -1524,8 +1525,8 @@ getPHIDestReg(PHI)); MIB.addReg(CombinedSourceReg); MIB.addMBB(LastMerge); - DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", BB#" - << LastMerge->getNumber()); + DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", " + << printMBBReference(*LastMerge)); for (unsigned i = 0; i < NumInputs; ++i) { if (isPHIRegionIndex(PHIRegionIndices, i)) { continue; @@ -1534,8 +1535,8 @@ MachineBasicBlock *SourcePred = getPHIPred(PHI, i); MIB.addReg(SourceReg); MIB.addMBB(SourcePred); - DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#" - << SourcePred->getNumber()); + DEBUG(dbgs() << printReg(SourceReg, TRI) << ", " + << printMBBReference(*SourcePred)); } DEBUG(dbgs() << ")\n"); } else { @@ -1572,8 +1573,8 @@ getPHIDestReg(PHI)); MIB.addReg(CombinedSourceReg); MIB.addMBB(IfMBB); - DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", BB#" - << IfMBB->getNumber()); + DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", " + << printMBBReference(*IfMBB)); unsigned NumInputs = getPHINumInputs(PHI); for (unsigned i = 0; i < NumInputs; ++i) { if (isPHIRegionIndex(PHIRegionIndices, i)) { @@ -1583,8 +1584,8 @@ MachineBasicBlock *SourcePred = getPHIPred(PHI, i); MIB.addReg(SourceReg); MIB.addMBB(SourcePred); - DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#" - << SourcePred->getNumber()); + DEBUG(dbgs() << printReg(SourceReg, TRI) << ", " + << printMBBReference(*SourcePred)); } DEBUG(dbgs() << ")\n"); PHI.eraseFromParent(); @@ -1749,11 +1750,11 @@ if (MergeBB->succ_begin() == MergeBB->succ_end()) { return; } - DEBUG(dbgs() << "Merge PHI (BB#" << MergeBB->getNumber() + DEBUG(dbgs() << "Merge PHI (" << printMBBReference(*MergeBB) << "): " << printReg(DestRegister, TRI) << " = PHI(" - << printReg(IfSourceRegister, TRI) << ", BB#" - << IfBB->getNumber() << printReg(CodeSourceRegister, TRI) - << ", BB#" << CodeBB->getNumber() << ")\n"); + << printReg(IfSourceRegister, TRI) << ", " + << printMBBReference(*IfBB) << printReg(CodeSourceRegister, TRI) + << ", " << printMBBReference(*CodeBB) << ")\n"); const DebugLoc &DL = MergeBB->findDebugLoc(MergeBB->begin()); MachineInstrBuilder MIB = BuildMI(*MergeBB, MergeBB->instr_begin(), DL, TII->get(TargetOpcode::PHI), DestRegister); @@ -1811,8 +1812,8 @@ for (auto SI : Succs) { std::pair Edge = SI; - DEBUG(dbgs() << "Removing edge: BB#" << Edge.first->getNumber() << " -> BB#" - << Edge.second->getNumber() << "\n"); + DEBUG(dbgs() << "Removing edge: " << printMBBReference(*Edge.first) + << " -> " << printMBBReference(*Edge.second) << "\n"); Edge.first->removeSuccessor(Edge.second); } } @@ -1850,8 +1851,8 @@ if (!CodeBBEnd->isSuccessor(MergeBB)) CodeBBEnd->addSuccessor(MergeBB); - DEBUG(dbgs() << "Moved MBB#" << CodeBBStart->getNumber() << " through MBB#" - << CodeBBEnd->getNumber() << "\n"); + DEBUG(dbgs() << "Moved " << printMBBReference(*CodeBBStart) << " through " + << printMBBReference(*CodeBBEnd) << "\n"); // If we have a single predecessor we can find a reasonable debug location MachineBasicBlock *SinglePred = @@ -2064,7 +2065,7 @@ // is a source block for a definition. SmallVector Sources; if (PHIInfo.findSourcesFromMBB(CodeBB, Sources)) { - DEBUG(dbgs() << "Inserting PHI Live Out from BB#" << CodeBB->getNumber() + DEBUG(dbgs() << "Inserting PHI Live Out from " << printMBBReference(*CodeBB) << "\n"); for (auto SI : Sources) { unsigned DestReg; @@ -2172,16 +2173,17 @@ CurrentBackedgeReg = NewBackedgeReg; DEBUG(dbgs() << "Inserting backedge PHI: " << printReg(NewBackedgeReg, TRI) << " = PHI(" - << printReg(CurrentBackedgeReg, TRI) << ", BB#" - << getPHIPred(*PHIDefInstr, 0)->getNumber() << ", " + << printReg(CurrentBackedgeReg, TRI) << ", " + << printMBBReference(*getPHIPred(*PHIDefInstr, 0)) + << ", " << printReg(getPHISourceReg(*PHIDefInstr, 1), TRI) - << ", BB#" << (*SRI).second->getNumber()); + << ", " << printMBBReference(*(*SRI).second)); } } else { MIB.addReg(SourceReg); MIB.addMBB((*SRI).second); - DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#" - << (*SRI).second->getNumber() << ", "); + DEBUG(dbgs() << printReg(SourceReg, TRI) << ", " + << printMBBReference(*(*SRI).second) << ", "); } } @@ -2189,8 +2191,8 @@ if (CurrentBackedgeReg != 0) { MIB.addReg(CurrentBackedgeReg); MIB.addMBB(Exit); - DEBUG(dbgs() << printReg(CurrentBackedgeReg, TRI) << ", BB#" - << Exit->getNumber() << ")\n"); + DEBUG(dbgs() << printReg(CurrentBackedgeReg, TRI) << ", " + << printMBBReference(*Exit) << ")\n"); } else { DEBUG(dbgs() << ")\n"); } @@ -2443,11 +2445,12 @@ << " = PHI("); MIB.addReg(PHISource); MIB.addMBB(Entry); - DEBUG(dbgs() << printReg(PHISource, TRI) << ", BB#" << Entry->getNumber()); + DEBUG(dbgs() << printReg(PHISource, TRI) << ", " + << printMBBReference(*Entry)); MIB.addReg(RegionSourceReg); MIB.addMBB(RegionSourceMBB); - DEBUG(dbgs() << " ," << printReg(RegionSourceReg, TRI) << ", BB#" - << RegionSourceMBB->getNumber() << ")\n"); + DEBUG(dbgs() << " ," << printReg(RegionSourceReg, TRI) << ", " + << printMBBReference(*RegionSourceMBB) << ")\n"); } void AMDGPUMachineCFGStructurizer::splitLoopPHIs(MachineBasicBlock *Entry, @@ -2528,9 +2531,9 @@ MachineBasicBlock *EntrySucc = split(Entry->getFirstNonPHI()); MachineBasicBlock *Exit = LRegion->getExit(); - DEBUG(dbgs() << "Split BB#" << Entry->getNumber() << " to BB#" - << Entry->getNumber() << " -> BB#" << EntrySucc->getNumber() - << "\n"); + DEBUG(dbgs() << "Split " << printMBBReference(*Entry) << " to " + << printMBBReference(*Entry) << " -> " + << printMBBReference(*EntrySucc) << "\n"); LRegion->addMBB(EntrySucc); // Make the backedge go to Entry Succ Index: llvm/trunk/lib/Target/AMDGPU/GCNIterativeScheduler.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/GCNIterativeScheduler.cpp +++ llvm/trunk/lib/Target/AMDGPU/GCNIterativeScheduler.cpp @@ -63,8 +63,8 @@ unsigned MaxInstNum = std::numeric_limits::max()) { auto BB = Begin->getParent(); - OS << BB->getParent()->getName() << ":BB#" << BB->getNumber() - << ' ' << BB->getName() << ":\n"; + OS << BB->getParent()->getName() << ":" << printMBBReference(*BB) << ' ' + << BB->getName() << ":\n"; auto I = Begin; MaxInstNum = std::max(MaxInstNum, 1u); for (; I != End && MaxInstNum; ++I, --MaxInstNum) { Index: llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp +++ llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp @@ -531,9 +531,8 @@ } DEBUG(dbgs() << "********** MI Scheduling **********\n"); - DEBUG(dbgs() << MF.getName() - << ":BB#" << MBB->getNumber() << " " << MBB->getName() - << "\n From: " << *begin() << " To: "; + DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " " + << MBB->getName() << "\n From: " << *begin() << " To: "; if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; else dbgs() << "End"; dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'); Index: llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -22,7 +22,7 @@ /// %2 = VECTOR_INST /// %3 = COPY %2 /// BB2: -/// %4 = PHI %1 , , %3 , +/// %4 = PHI %1 , <%bb.0>, %3 , <%bb.1> /// %5 = VECTOR_INST %4 /// /// @@ -37,7 +37,7 @@ /// %2 = VECTOR_INST /// %3 = COPY %2 /// BB2: -/// %4 = PHI %0 , , %3 , +/// %4 = PHI %0 , <%bb.0>, %3 , <%bb.1> /// %5 = VECTOR_INST %4 /// /// Now that the result of the PHI instruction is an SGPR, the register @@ -52,7 +52,7 @@ /// %2 = VECTOR_INST /// %3 = COPY %2 /// BB2: -/// %4 = PHI %0 , , %3 , +/// %4 = PHI %0 , <%bb.0>, %3 , <%bb.1> /// %5 = VECTOR_INST %4 /// /// Now this code contains an illegal copy from a VGPR to an SGPR. @@ -515,8 +515,9 @@ if (MDT.dominates(MI1, MI2)) { if (!intereferes(MI2, MI1)) { - DEBUG(dbgs() << "Erasing from BB#" << MI2->getParent()->getNumber() - << " " << *MI2); + DEBUG(dbgs() << "Erasing from " + << printMBBReference(*MI2->getParent()) << " " + << *MI2); MI2->eraseFromParent(); Defs.erase(I2++); Changed = true; @@ -524,8 +525,9 @@ } } else if (MDT.dominates(MI2, MI1)) { if (!intereferes(MI1, MI2)) { - DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber() - << " " << *MI1); + DEBUG(dbgs() << "Erasing from " + << printMBBReference(*MI1->getParent()) << " " + << *MI1); MI1->eraseFromParent(); Defs.erase(I1++); Changed = true; @@ -541,10 +543,11 @@ MachineBasicBlock::iterator I = MBB->getFirstNonPHI(); if (!intereferes(MI1, I) && !intereferes(MI2, I)) { - DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber() - << " " << *MI1 << "and moving from BB#" - << MI2->getParent()->getNumber() << " to BB#" - << I->getParent()->getNumber() << " " << *MI2); + DEBUG(dbgs() << "Erasing from " + << printMBBReference(*MI1->getParent()) << " " << *MI1 + << "and moving from " + << printMBBReference(*MI2->getParent()) << " to " + << printMBBReference(*I->getParent()) << " " << *MI2); I->getParent()->splice(I, MI2->getParent(), MI2); MI1->eraseFromParent(); Defs.erase(I1++); Index: llvm/trunk/lib/Target/AMDGPU/SIMachineScheduler.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIMachineScheduler.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIMachineScheduler.cpp @@ -2050,9 +2050,9 @@ placeDebugValues(); DEBUG({ - unsigned BBNum = begin()->getParent()->getNumber(); - dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; - dumpSchedule(); - dbgs() << '\n'; - }); + dbgs() << "*** Final schedule for " + << printMBBReference(*begin()->getParent()) << " ***\n"; + dumpSchedule(); + dbgs() << '\n'; + }); } Index: llvm/trunk/lib/Target/AMDGPU/SIWholeQuadMode.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -224,7 +224,8 @@ #ifndef NDEBUG LLVM_DUMP_METHOD void SIWholeQuadMode::printInfo() { for (const auto &BII : Blocks) { - dbgs() << "\nBB#" << BII.first->getNumber() << ":\n" + dbgs() << "\n" + << printMBBReference(*BII.first) << ":\n" << " InNeeds = " << PrintState(BII.second.InNeeds) << ", Needs = " << PrintState(BII.second.Needs) << ", OutNeeds = " << PrintState(BII.second.OutNeeds) << "\n\n"; @@ -680,7 +681,7 @@ if (!isEntry && BI.Needs == StateWQM && BI.OutNeeds != StateExact) return; - DEBUG(dbgs() << "\nProcessing block BB#" << MBB.getNumber() << ":\n"); + DEBUG(dbgs() << "\nProcessing block " << printMBBReference(MBB) << ":\n"); unsigned SavedWQMReg = 0; unsigned SavedNonWWMReg = 0; Index: llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp +++ llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -326,7 +326,7 @@ DEBUG({ for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; - dbgs() << format("%08x BB#%u\t", BBI.Offset, J) + dbgs() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb=" << unsigned(BBI.KnownBits) << " ua=" << unsigned(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign) @@ -1071,11 +1071,11 @@ const BasicBlockInfo &BBI = BBInfo[Block]; dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm() << " max delta=" << MaxDisp - << format(" insn address=%#x", UserOffset) - << " in BB#" << Block << ": " + << format(" insn address=%#x", UserOffset) << " in " + << printMBBReference(*MI->getParent()) << ": " << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: ", CPEOffset, - int(CPEOffset-UserOffset)); + int(CPEOffset - UserOffset)); }); } @@ -1261,7 +1261,7 @@ // This is the least amount of required padding seen so far. BestGrowth = Growth; WaterIter = IP; - DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber() + DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB) << " Growth=" << Growth << '\n'); if (CloserWater && WaterBB == U.MI->getParent()) @@ -1305,8 +1305,8 @@ unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta; if (isOffsetInRange(UserOffset, CPEOffset, U)) { - DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber() - << format(", expected CPE offset %#x\n", CPEOffset)); + DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB) + << format(", expected CPE offset %#x\n", CPEOffset)); NewMBB = &*++UserMBB->getIterator(); // Add an unconditional branch from UserMBB to fallthrough block. Record // it for branch lengthening; this new branch will not get out of range, @@ -1578,11 +1578,11 @@ unsigned BrOffset = getOffsetOf(MI) + PCAdj; unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset; - DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber() - << " from BB#" << MI->getParent()->getNumber() - << " max delta=" << MaxDisp - << " from " << getOffsetOf(MI) << " to " << DestOffset - << " offset " << int(DestOffset-BrOffset) << "\t" << *MI); + DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB) + << " from " << printMBBReference(*MI->getParent()) + << " max delta=" << MaxDisp << " from " << getOffsetOf(MI) + << " to " << DestOffset << " offset " + << int(DestOffset - BrOffset) << "\t" << *MI); if (BrOffset <= DestOffset) { // Branch before the Dest. @@ -1700,9 +1700,9 @@ } MachineBasicBlock *NextBB = &*++MBB->getIterator(); - DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber() - << " also invert condition and change dest. to BB#" - << NextBB->getNumber() << "\n"); + DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB) + << " also invert condition and change dest. to " + << printMBBReference(*NextBB) << "\n"); // Insert a new conditional branch and a new unconditional branch. // Also update the ImmBranch as well as adding a new entry for the new branch. @@ -2212,7 +2212,7 @@ .addReg(IdxReg, getKillRegState(IdxRegKill)) .addJumpTableIndex(JTI, JTOP.getTargetFlags()) .addImm(CPEMI->getOperand(0).getImm()); - DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": " << *NewJTMI); + DEBUG(dbgs() << printMBBReference(*MBB) << ": " << *NewJTMI); unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH; CPEMI->setDesc(TII->get(JTOpc)); Index: llvm/trunk/lib/Target/ARM/ARMConstantPoolValue.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMConstantPoolValue.cpp +++ llvm/trunk/lib/Target/ARM/ARMConstantPoolValue.cpp @@ -292,6 +292,6 @@ } void ARMConstantPoolMBB::print(raw_ostream &O) const { - O << "BB#" << MBB->getNumber(); + O << printMBBReference(*MBB); ARMConstantPoolValue::print(O); } Index: llvm/trunk/lib/Target/BPF/BPFISelDAGToDAG.cpp =================================================================== --- llvm/trunk/lib/Target/BPF/BPFISelDAGToDAG.cpp +++ llvm/trunk/lib/Target/BPF/BPFISelDAGToDAG.cpp @@ -573,10 +573,10 @@ return; } else { // The PHI node looks like: - // %2 = PHI %0, , %1, - // Trace each incoming definition, e.g., (%0, BB#1) and (%1, BB#3) - // The AND operation can be removed if both %0 in BB#1 and %1 in - // BB#3 are defined with with a load matching the MaskN. + // %2 = PHI %0, <%bb.1>, %1, <%bb.3> + // Trace each incoming definition, e.g., (%0, %bb.1) and (%1, %bb.3) + // The AND operation can be removed if both %0 in %bb.1 and %1 in + // %bb.3 are defined with with a load matching the MaskN. DEBUG(dbgs() << "Check PHI Insn: "; MII->dump(); dbgs() << '\n'); unsigned PrevReg = -1; for (unsigned i = 0; i < MII->getNumOperands(); ++i) { Index: llvm/trunk/lib/Target/Hexagon/BitTracker.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/BitTracker.cpp +++ llvm/trunk/lib/Target/Hexagon/BitTracker.cpp @@ -767,7 +767,7 @@ void BT::visitPHI(const MachineInstr &PI) { int ThisN = PI.getParent()->getNumber(); if (Trace) - dbgs() << "Visit FI(BB#" << ThisN << "): " << PI; + dbgs() << "Visit FI(" << printMBBReference(*PI.getParent()) << "): " << PI; const MachineOperand &MD = PI.getOperand(0); assert(MD.getSubReg() == 0 && "Unexpected sub-register in definition"); @@ -784,7 +784,8 @@ const MachineBasicBlock *PB = PI.getOperand(i + 1).getMBB(); int PredN = PB->getNumber(); if (Trace) - dbgs() << " edge BB#" << PredN << "->BB#" << ThisN; + dbgs() << " edge " << printMBBReference(*PB) << "->" + << printMBBReference(*PI.getParent()); if (!EdgeExec.count(CFGEdge(PredN, ThisN))) { if (Trace) dbgs() << " not executable\n"; @@ -809,10 +810,8 @@ } void BT::visitNonBranch(const MachineInstr &MI) { - if (Trace) { - int ThisN = MI.getParent()->getNumber(); - dbgs() << "Visit MI(BB#" << ThisN << "): " << MI; - } + if (Trace) + dbgs() << "Visit MI(" << printMBBReference(*MI.getParent()) << "): " << MI; if (MI.isDebugValue()) return; assert(!MI.isBranch() && "Unexpected branch instruction"); @@ -897,7 +896,7 @@ BTs.clear(); const MachineInstr &MI = *It; if (Trace) - dbgs() << "Visit BR(BB#" << ThisN << "): " << MI; + dbgs() << "Visit BR(" << printMBBReference(B) << "): " << MI; assert(MI.isBranch() && "Expecting branch instruction"); InstrExec.insert(&MI); bool Eval = ME.evaluate(MI, Map, BTs, FallsThrough); @@ -913,7 +912,7 @@ if (Trace) { dbgs() << " adding targets:"; for (unsigned i = 0, n = BTs.size(); i < n; ++i) - dbgs() << " BB#" << BTs[i]->getNumber(); + dbgs() << " " << printMBBReference(*BTs[i]); if (FallsThrough) dbgs() << "\n falls through\n"; else Index: llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp @@ -2977,7 +2977,7 @@ } bool HexagonLoopRescheduling::processLoop(LoopCand &C) { - DEBUG(dbgs() << "Processing loop in BB#" << C.LB->getNumber() << "\n"); + DEBUG(dbgs() << "Processing loop in " << printMBBReference(*C.LB) << "\n"); std::vector Phis; for (auto &I : *C.LB) { if (!I.isPHI()) Index: llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp @@ -617,7 +617,7 @@ void MachineConstPropagator::visitPHI(const MachineInstr &PN) { const MachineBasicBlock *MB = PN.getParent(); unsigned MBN = MB->getNumber(); - DEBUG(dbgs() << "Visiting FI(BB#" << MBN << "): " << PN); + DEBUG(dbgs() << "Visiting FI(" << printMBBReference(*MB) << "): " << PN); const MachineOperand &MD = PN.getOperand(0); Register DefR(MD); @@ -642,8 +642,8 @@ const MachineBasicBlock *PB = PN.getOperand(i+1).getMBB(); unsigned PBN = PB->getNumber(); if (!EdgeExec.count(CFGEdge(PBN, MBN))) { - DEBUG(dbgs() << " edge BB#" << PBN << "->BB#" << MBN - << " not executable\n"); + DEBUG(dbgs() << " edge " << printMBBReference(*PB) << "->" + << printMBBReference(*MB) << " not executable\n"); continue; } const MachineOperand &SO = PN.getOperand(i); @@ -658,9 +658,8 @@ LatticeCell SrcC; bool Eval = MCE.evaluate(UseR, Cells.get(UseR.Reg), SrcC); - DEBUG(dbgs() << " edge from BB#" << PBN << ": " - << printReg(UseR.Reg, &MCE.TRI, UseR.SubReg) - << SrcC << '\n'); + DEBUG(dbgs() << " edge from " << printMBBReference(*PB) << ": " + << printReg(UseR.Reg, &MCE.TRI, UseR.SubReg) << SrcC << '\n'); Changed |= Eval ? DefC.meet(SrcC) : DefC.setBottom(); Cells.update(DefR.Reg, DefC); @@ -672,7 +671,7 @@ } void MachineConstPropagator::visitNonBranch(const MachineInstr &MI) { - DEBUG(dbgs() << "Visiting MI(BB#" << MI.getParent()->getNumber() + DEBUG(dbgs() << "Visiting MI(" << printMBBReference(*MI.getParent()) << "): " << MI); CellMap Outputs; bool Eval = MCE.evaluate(MI, Cells, Outputs); @@ -729,8 +728,8 @@ while (It != End) { const MachineInstr &MI = *It; InstrExec.insert(&MI); - DEBUG(dbgs() << "Visiting " << (EvalOk ? "BR" : "br") << "(BB#" - << MBN << "): " << MI); + DEBUG(dbgs() << "Visiting " << (EvalOk ? "BR" : "br") << "(" + << printMBBReference(B) << "): " << MI); // Do not evaluate subsequent branches if the evaluation of any of the // previous branches failed. Keep iterating over the branches only // to mark them as executable. @@ -772,7 +771,8 @@ for (const MachineBasicBlock *TB : Targets) { unsigned TBN = TB->getNumber(); - DEBUG(dbgs() << " pushing edge BB#" << MBN << " -> BB#" << TBN << "\n"); + DEBUG(dbgs() << " pushing edge " << printMBBReference(B) << " -> " + << printMBBReference(*TB) << "\n"); FlowQ.push(CFGEdge(MBN, TBN)); } } @@ -870,8 +870,10 @@ CFGEdge Edge = FlowQ.front(); FlowQ.pop(); - DEBUG(dbgs() << "Picked edge BB#" << Edge.first << "->BB#" - << Edge.second << '\n'); + DEBUG(dbgs() << "Picked edge " + << printMBBReference(*MF.getBlockNumbered(Edge.first)) << "->" + << printMBBReference(*MF.getBlockNumbered(Edge.second)) + << '\n'); if (Edge.first != EntryNum) if (EdgeExec.count(Edge)) continue; @@ -934,7 +936,8 @@ for (const MachineBasicBlock *SB : B.successors()) { unsigned SN = SB->getNumber(); if (!EdgeExec.count(CFGEdge(BN, SN))) - dbgs() << " BB#" << BN << " -> BB#" << SN << '\n'; + dbgs() << " " << printMBBReference(B) << " -> " + << printMBBReference(*SB) << '\n'; } } }); @@ -3126,7 +3129,7 @@ if (BrI.getOpcode() == Hexagon::J2_jump) return false; - DEBUG(dbgs() << "Rewrite(BB#" << B.getNumber() << "):" << BrI); + DEBUG(dbgs() << "Rewrite(" << printMBBReference(B) << "):" << BrI); bool Rewritten = false; if (NumTargets > 0) { assert(!FallsThru && "This should have been checked before"); Index: llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp @@ -27,24 +27,24 @@ // // %40 = L2_loadrub_io %39, 1 // %41 = S2_tstbit_i %40, 0 -// J2_jumpt %41, , %pc -// J2_jump , %pc -// Successors according to CFG: BB#4(62) BB#5(62) +// J2_jumpt %41, <%bb.5>, %pc +// J2_jump <%bb.4>, %pc +// Successors according to CFG: %bb.4(62) %bb.5(62) // -// BB#4: derived from LLVM BB %if.then -// Predecessors according to CFG: BB#3 +// %bb.4: derived from LLVM BB %if.then +// Predecessors according to CFG: %bb.3 // %11 = A2_addp %6, %10 // S2_storerd_io %32, 16, %11 -// Successors according to CFG: BB#5 +// Successors according to CFG: %bb.5 // -// BB#5: derived from LLVM BB %if.end -// Predecessors according to CFG: BB#3 BB#4 -// %12 = PHI %6, , %11, +// %bb.5: derived from LLVM BB %if.end +// Predecessors according to CFG: %bb.3 %bb.4 +// %12 = PHI %6, <%bb.3>, %11, <%bb.4> // %13 = A2_addp %7, %12 // %42 = C2_cmpeqi %9, 10 -// J2_jumpf %42, , %pc -// J2_jump , %pc -// Successors according to CFG: BB#6(4) BB#3(124) +// J2_jumpf %42, <%bb.3>, %pc +// J2_jump <%bb.6>, %pc +// Successors according to CFG: %bb.6(4) %bb.3(124) // // would become: // @@ -55,9 +55,9 @@ // %46 = PS_pselect %41, %6, %11 // %13 = A2_addp %7, %46 // %42 = C2_cmpeqi %9, 10 -// J2_jumpf %42, , %pc -// J2_jump , %pc -// Successors according to CFG: BB#6 BB#3 +// J2_jumpf %42, <%bb.3>, %pc +// J2_jump <%bb.6>, %pc +// Successors according to CFG: %bb.6 %bb.3 #include "Hexagon.h" #include "HexagonInstrInfo.h" @@ -238,7 +238,7 @@ bool HexagonEarlyIfConversion::matchFlowPattern(MachineBasicBlock *B, MachineLoop *L, FlowPattern &FP) { - DEBUG(dbgs() << "Checking flow pattern at BB#" << B->getNumber() << "\n"); + DEBUG(dbgs() << "Checking flow pattern at " << printMBBReference(*B) << "\n"); // Interested only in conditional branches, no .new, no new-value, etc. // Check the terminators directly, it's easier than handling all responses Index: llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -654,7 +654,7 @@ return false; TfrCounter++; } - DEBUG(dbgs() << "\nsplitting BB#" << MI.getParent()->getNumber() << ": " + DEBUG(dbgs() << "\nsplitting " << printMBBReference(*MI.getParent()) << ": " << MI); MachineOperand &MD = MI.getOperand(0); // Definition MachineOperand &MP = MI.getOperand(1); // Predicate register Index: llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -443,7 +443,7 @@ DEBUG({ dbgs() << "Blocks needing SF: {"; for (auto &B : SFBlocks) - dbgs() << " BB#" << B->getNumber(); + dbgs() << " " << printMBBReference(*B); dbgs() << " }\n"; }); // No frame needed? @@ -464,12 +464,16 @@ break; } DEBUG({ - dbgs() << "Computed dom block: BB#"; - if (DomB) dbgs() << DomB->getNumber(); - else dbgs() << ""; - dbgs() << ", computed pdom block: BB#"; - if (PDomB) dbgs() << PDomB->getNumber(); - else dbgs() << ""; + dbgs() << "Computed dom block: "; + if (DomB) + dbgs() << printMBBReference(*DomB); + else + dbgs() << ""; + dbgs() << ", computed pdom block: "; + if (PDomB) + dbgs() << printMBBReference(*PDomB); + else + dbgs() << ""; dbgs() << "\n"; }); if (!DomB || !PDomB) @@ -2010,7 +2014,7 @@ auto P = BlockIndexes.insert( std::make_pair(&B, HexagonBlockRanges::InstrIndexMap(B))); auto &IndexMap = P.first->second; - DEBUG(dbgs() << "Index map for BB#" << B.getNumber() << "\n" + DEBUG(dbgs() << "Index map for " << printMBBReference(B) << "\n" << IndexMap << '\n'); for (auto &In : B) { @@ -2129,7 +2133,8 @@ else dbgs() << "\n"; for (auto &R : P.second.Map) - dbgs() << " BB#" << R.first->getNumber() << " { " << R.second << "}\n"; + dbgs() << " " << printMBBReference(*R.first) << " { " << R.second + << "}\n"; } }); @@ -2162,7 +2167,7 @@ auto &FIs = P.second; if (FIs.empty()) continue; - dbgs() << " BB#" << P.first->getNumber() << ": {"; + dbgs() << " " << printMBBReference(*P.first) << ": {"; for (auto I : FIs) { dbgs() << " fi#" << I; if (LoxFIs.count(I)) @@ -2183,7 +2188,7 @@ HexagonBlockRanges::InstrIndexMap &IM = F->second; HexagonBlockRanges::RegToRangeMap LM = HBR.computeLiveMap(IM); HexagonBlockRanges::RegToRangeMap DM = HBR.computeDeadMap(IM, LM); - DEBUG(dbgs() << "BB#" << B.getNumber() << " dead map\n" + DEBUG(dbgs() << printMBBReference(B) << " dead map\n" << HexagonBlockRanges::PrintRangeMap(DM, HRI)); for (auto FI : BlockFIMap[&B]) { Index: llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp @@ -915,7 +915,7 @@ void HexagonGenInsert::collectInBlock(MachineBasicBlock *B, OrderedRegisterList &AVs) { if (isDebug()) - dbgs() << "visiting block BB#" << B->getNumber() << "\n"; + dbgs() << "visiting block " << printMBBReference(*B) << "\n"; // First, check if this block is reachable at all. If not, the bit tracker // will not have any information about registers in it. Index: llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -1011,7 +1011,7 @@ bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L, bool IsInnerHWLoop) const { const std::vector &Blocks = L->getBlocks(); - DEBUG(dbgs() << "\nhw_loop head, BB#" << Blocks[0]->getNumber();); + DEBUG(dbgs() << "\nhw_loop head, " << printMBBReference(*Blocks[0])); for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { MachineBasicBlock *MBB = Blocks[i]; for (MachineBasicBlock::iterator @@ -1367,7 +1367,7 @@ LoopFeederMap &LoopFeederPhi) const { if (LoopFeederPhi.find(MO->getReg()) == LoopFeederPhi.end()) { const std::vector &Blocks = L->getBlocks(); - DEBUG(dbgs() << "\nhw_loop head, BB#" << Blocks[0]->getNumber();); + DEBUG(dbgs() << "\nhw_loop head, " << printMBBReference(*Blocks[0])); // Ignore all BBs that form Loop. for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { MachineBasicBlock *MBB = Blocks[i]; Index: llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -463,7 +463,7 @@ Cond.push_back(LastInst->getOperand(1)); return false; } - DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber() + DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB) << " with one jump\n";); // Otherwise, don't know what this is. return true; @@ -511,7 +511,7 @@ FBB = LastInst->getOperand(0).getMBB(); return false; } - DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber() + DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB) << " with two jumps";); // Otherwise, can't handle this. return true; @@ -521,7 +521,7 @@ int *BytesRemoved) const { assert(!BytesRemoved && "code size not handled"); - DEBUG(dbgs() << "\nRemoving branches out of BB#" << MBB.getNumber()); + DEBUG(dbgs() << "\nRemoving branches out of " << printMBBReference(MBB)); MachineBasicBlock::iterator I = MBB.end(); unsigned Count = 0; while (I != MBB.begin()) { @@ -593,7 +593,7 @@ // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset) // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset) unsigned Flags1 = getUndefRegState(Cond[1].isUndef()); - DEBUG(dbgs() << "\nInserting NVJump for BB#" << MBB.getNumber();); + DEBUG(dbgs() << "\nInserting NVJump for " << printMBBReference(MBB);); if (Cond[2].isReg()) { unsigned Flags2 = getUndefRegState(Cond[2].isUndef()); BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1). @@ -829,9 +829,8 @@ #ifndef NDEBUG // Show the invalid registers to ease debugging. - dbgs() << "Invalid registers for copy in BB#" << MBB.getNumber() - << ": " << printReg(DestReg, &HRI) - << " = " << printReg(SrcReg, &HRI) << '\n'; + dbgs() << "Invalid registers for copy in " << printMBBReference(MBB) << ": " + << printReg(DestReg, &HRI) << " = " << printReg(SrcReg, &HRI) << '\n'; #endif llvm_unreachable("Unimplemented"); } @@ -4032,8 +4031,9 @@ bool HexagonInstrInfo::invertAndChangeJumpTarget( MachineInstr &MI, MachineBasicBlock *NewTarget) const { - DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to BB#" - << NewTarget->getNumber(); MI.dump();); + DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to " + << printMBBReference(*NewTarget); + MI.dump();); assert(MI.isBranch()); unsigned NewOpcode = getInvertedPredicatedOpcode(MI.getOpcode()); int TargetPos = MI.getNumOperands() - 1; Index: llvm/trunk/lib/Target/Hexagon/HexagonMachineScheduler.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonMachineScheduler.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonMachineScheduler.cpp @@ -186,12 +186,10 @@ /// after setting up the current scheduling region. [RegionBegin, RegionEnd) /// only includes instructions that have DAG nodes, not scheduling boundaries. void VLIWMachineScheduler::schedule() { - DEBUG(dbgs() - << "********** MI Converging Scheduling VLIW BB#" << BB->getNumber() - << " " << BB->getName() - << " in_func " << BB->getParent()->getFunction()->getName() - << " at loop depth " << MLI->getLoopDepth(BB) - << " \n"); + DEBUG(dbgs() << "********** MI Converging Scheduling VLIW " + << printMBBReference(*BB) << " " << BB->getName() << " in_func " + << BB->getParent()->getFunction()->getName() << " at loop depth " + << MLI->getLoopDepth(BB) << " \n"); buildDAGWithRegPressure(); @@ -237,8 +235,8 @@ placeDebugValues(); DEBUG({ - unsigned BBNum = begin()->getParent()->getNumber(); - dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; + dbgs() << "*** Final schedule for " + << printMBBReference(*begin()->getParent()) << " ***\n"; dumpSchedule(); dbgs() << '\n'; }); Index: llvm/trunk/lib/Target/Hexagon/HexagonOptAddrMode.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonOptAddrMode.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonOptAddrMode.cpp @@ -461,7 +461,7 @@ DEBUG(dbgs() << "[InstrNode]: " << Print>(UseIA, *DFG) << "\n"); MachineInstr *UseMI = UseIA.Addr->getCode(); - DEBUG(dbgs() << "[MI getParent()->getNumber() + DEBUG(dbgs() << "[MI <" << printMBBReference(*UseMI->getParent()) << ">]: " << *UseMI << "\n"); const MCInstrDesc &UseMID = UseMI->getDesc(); assert(HII->getAddrMode(*UseMI) == HexagonII::BaseImmOffset); @@ -570,7 +570,7 @@ NodeAddr OwnerN = UseN.Addr->getOwner(*DFG); MachineInstr *UseMI = OwnerN.Addr->getCode(); - DEBUG(dbgs() << "\t\t[MI getParent()->getNumber() + DEBUG(dbgs() << "\t\t[MI <" << printMBBReference(*UseMI->getParent()) << ">]: " << *UseMI << "\n"); int UseMOnum = -1; Index: llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp @@ -20,19 +20,18 @@ // ... // %16 = NOT_p %15 // ... -// JMP_c %16, , %pc +// JMP_c %16, <%bb.1>, %pc // // Into // %15 = CMPGTrr %6, %2; // ... -// JMP_cNot %15, , %pc; +// JMP_cNot %15, <%bb.1>, %pc; // // Note: The peephole pass makes the instrucstions like // %170 = SXTW %166 or %16 = NOT_p %15 // redundant and relies on some form of dead removal instructions, like // DCE or DIE to actually eliminate them. - //===----------------------------------------------------------------------===// #include "Hexagon.h" Index: llvm/trunk/lib/Target/Hexagon/HexagonSplitDouble.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonSplitDouble.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonSplitDouble.cpp @@ -536,7 +536,7 @@ Rs.insert(CmpR2); DEBUG({ - dbgs() << "For loop at BB#" << HB->getNumber() << " ind regs: "; + dbgs() << "For loop at " << printMBBReference(*HB) << " ind regs: "; dump_partition(dbgs(), Rs, *TRI); dbgs() << '\n'; }); Index: llvm/trunk/lib/Target/Hexagon/RDFGraph.h =================================================================== --- llvm/trunk/lib/Target/Hexagon/RDFGraph.h +++ llvm/trunk/lib/Target/Hexagon/RDFGraph.h @@ -111,7 +111,7 @@ // // DFG dump:[ // f1: Function foo -// b2: === BB#0 === preds(0), succs(0): +// b2: === %bb.0 === preds(0), succs(0): // p3: phi [d4(,d12,u9):] // p5: phi [d6(,,u10):] // s7: add [d8(,,u13):, u9(d4):, u10(d6):] Index: llvm/trunk/lib/Target/Hexagon/RDFGraph.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/RDFGraph.cpp +++ llvm/trunk/lib/Target/Hexagon/RDFGraph.cpp @@ -247,7 +247,7 @@ if (T != MI.operands_end()) { OS << ' '; if (T->isMBB()) - OS << "BB#" << T->getMBB()->getNumber(); + OS << printMBBReference(*T->getMBB()); else if (T->isGlobal()) OS << T->getGlobal()->getName(); else if (T->isSymbol()) @@ -284,13 +284,13 @@ auto PrintBBs = [&OS] (std::vector Ns) -> void { unsigned N = Ns.size(); for (int I : Ns) { - OS << "BB#" << I; + OS << "%bb." << I; if (--N) OS << ", "; } }; - OS << Print(P.Obj.Id, P.G) << ": --- BB#" << BB->getNumber() + OS << Print(P.Obj.Id, P.G) << ": --- " << printMBBReference(*BB) << " --- preds(" << NP << "): "; for (MachineBasicBlock *B : BB->predecessors()) Ns.push_back(B->getNumber()); @@ -1123,8 +1123,8 @@ if (!Defined.insert(RR.Reg).second) { MachineInstr *MI = NodeAddr(IA).Addr->getCode(); dbgs() << "Multiple definitions of register: " - << Print(RR, *this) << " in\n " << *MI - << "in BB#" << MI->getParent()->getNumber() << '\n'; + << Print(RR, *this) << " in\n " << *MI << "in " + << printMBBReference(*MI->getParent()) << '\n'; llvm_unreachable(nullptr); } #endif Index: llvm/trunk/lib/Target/Hexagon/RDFLiveness.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/RDFLiveness.cpp +++ llvm/trunk/lib/Target/Hexagon/RDFLiveness.cpp @@ -814,7 +814,7 @@ for (auto I = B.livein_begin(), E = B.livein_end(); I != E; ++I) LV.push_back(RegisterRef(I->PhysReg, I->LaneMask)); std::sort(LV.begin(), LV.end()); - dbgs() << "BB#" << B.getNumber() << "\t rec = {"; + dbgs() << printMBBReference(B) << "\t rec = {"; for (auto I : LV) dbgs() << ' ' << Print(I, DFG); dbgs() << " }\n"; @@ -963,7 +963,7 @@ } if (Trace) { - dbgs() << "\n-- BB#" << B->getNumber() << ": " << __func__ + dbgs() << "\n-- " << printMBBReference(*B) << ": " << __func__ << " after recursion into: {"; for (auto I : *N) dbgs() << ' ' << I->getBlock()->getNumber(); Index: llvm/trunk/lib/Target/MSP430/MSP430BranchSelector.cpp =================================================================== --- llvm/trunk/lib/Target/MSP430/MSP430BranchSelector.cpp +++ llvm/trunk/lib/Target/MSP430/MSP430BranchSelector.cpp @@ -138,15 +138,15 @@ continue; } - DEBUG(dbgs() << " Found a branch that needs expanding, BB#" - << DestBB->getNumber() << ", Distance " << BranchDistance - << "\n"); + DEBUG(dbgs() << " Found a branch that needs expanding, " + << printMBBReference(*DestBB) << ", Distance " + << BranchDistance << "\n"); // If JCC is not the last instruction we need to split the MBB. if (MI->getOpcode() == MSP430::JCC && std::next(MI) != EE) { - DEBUG(dbgs() << " Found a basic block that needs to be split, BB#" - << MBB->getNumber() << "\n"); + DEBUG(dbgs() << " Found a basic block that needs to be split, " + << printMBBReference(*MBB) << "\n"); // Create a new basic block. MachineBasicBlock *NewBB = Index: llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp =================================================================== --- llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp +++ llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -430,7 +430,7 @@ LLVM_DUMP_METHOD void MipsConstantIslands::dumpBBs() { for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; - dbgs() << format("%08x BB#%u\t", BBI.Offset, J) + dbgs() << format("%08x %bb.%u\t", BBI.Offset, J) << format(" size=%#x\n", BBInfo[J].Size); } } @@ -991,11 +991,11 @@ const BasicBlockInfo &BBI = BBInfo[Block]; dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm() << " max delta=" << MaxDisp - << format(" insn address=%#x", UserOffset) - << " in BB#" << Block << ": " + << format(" insn address=%#x", UserOffset) << " in " + << printMBBReference(*MI->getParent()) << ": " << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: ", CPEOffset, - int(CPEOffset-UserOffset)); + int(CPEOffset - UserOffset)); }); } @@ -1197,7 +1197,7 @@ // This is the least amount of required padding seen so far. BestGrowth = Growth; WaterIter = IP; - DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber() + DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB) << " Growth=" << Growth << '\n'); // Keep looking unless it is perfect. @@ -1236,8 +1236,8 @@ unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta; if (isOffsetInRange(UserOffset, CPEOffset, U)) { - DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber() - << format(", expected CPE offset %#x\n", CPEOffset)); + DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB) + << format(", expected CPE offset %#x\n", CPEOffset)); NewMBB = &*++UserMBB->getIterator(); // Add an unconditional branch from UserMBB to fallthrough block. Record // it for branch lengthening; this new branch will not get out of range, @@ -1470,11 +1470,11 @@ unsigned BrOffset = getOffsetOf(MI) + PCAdj; unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset; - DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber() - << " from BB#" << MI->getParent()->getNumber() - << " max delta=" << MaxDisp - << " from " << getOffsetOf(MI) << " to " << DestOffset - << " offset " << int(DestOffset-BrOffset) << "\t" << *MI); + DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB) + << " from " << printMBBReference(*MI->getParent()) + << " max delta=" << MaxDisp << " from " << getOffsetOf(MI) + << " to " << DestOffset << " offset " + << int(DestOffset - BrOffset) << "\t" << *MI); if (BrOffset <= DestOffset) { // Branch before the Dest. @@ -1615,9 +1615,9 @@ } MachineBasicBlock *NextBB = &*++MBB->getIterator(); - DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber() - << " also invert condition and change dest. to BB#" - << NextBB->getNumber() << "\n"); + DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB) + << " also invert condition and change dest. to " + << printMBBReference(*NextBB) << "\n"); // Insert a new conditional branch and a new unconditional branch. // Also update the ImmBranch as well as adding a new entry for the new branch. Index: llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp +++ llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp @@ -59,45 +59,45 @@ /// /// expands to the following machine code: /// -/// BB#0: derived from LLVM BB %entry +/// %bb.0: derived from LLVM BB %entry /// Live Ins: %f1 %f3 %x6 /// /// %0 = COPY %f1; F8RC:%0 /// %5 = CMPLWI %4, 0; CRRC:%5 GPRC:%4 /// %8 = LXSDX %zero8, %7, %rm; /// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7 -/// BCC 76, %5, ; CRRC:%5 -/// Successors according to CFG: BB#1(?%) BB#2(?%) +/// BCC 76, %5, <%bb.2>; CRRC:%5 +/// Successors according to CFG: %bb.1(?%) %bb.2(?%) /// -/// BB#1: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#0 -/// Successors according to CFG: BB#2(?%) -/// -/// BB#2: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#0 BB#1 -/// %9 = PHI %8, , %0, ; +/// %bb.1: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.0 +/// Successors according to CFG: %bb.2(?%) +/// +/// %bb.2: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.0 %bb.1 +/// %9 = PHI %8, <%bb.1>, %0, <%bb.0>; /// F8RC:%9,%8,%0 /// -/// BCC 76, %5, ; CRRC:%5 -/// Successors according to CFG: BB#3(?%) BB#4(?%) +/// BCC 76, %5, <%bb.4>; CRRC:%5 +/// Successors according to CFG: %bb.3(?%) %bb.4(?%) /// -/// BB#3: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#2 -/// Successors according to CFG: BB#4(?%) -/// -/// BB#4: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#2 BB#3 -/// %13 = PHI %12, , %2, ; +/// %bb.3: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.2 +/// Successors according to CFG: %bb.4(?%) +/// +/// %bb.4: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.2 %bb.3 +/// %13 = PHI %12, <%bb.3>, %2, <%bb.2>; /// F8RC:%13,%12,%2 /// /// BLR8 %lr8, %rm, %f1 /// /// When this pattern is detected, branch coalescing will try to collapse -/// it by moving code in BB#2 to BB#0 and/or BB#4 and removing BB#3. +/// it by moving code in %bb.2 to %bb.0 and/or %bb.4 and removing %bb.3. /// /// If all conditions are meet, IR should collapse to: /// -/// BB#0: derived from LLVM BB %entry +/// %bb.0: derived from LLVM BB %entry /// Live Ins: %f1 %f3 %x6 /// /// %0 = COPY %f1; F8RC:%0 @@ -105,19 +105,19 @@ /// %8 = LXSDX %zero8, %7, %rm; /// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7 /// -/// BCC 76, %5, ; CRRC:%5 -/// Successors according to CFG: BB#1(0x2aaaaaaa / 0x80000000 = 33.33%) -/// BB#4(0x55555554 / 0x80000000 = 66.67%) -/// -/// BB#1: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#0 -/// Successors according to CFG: BB#4(0x40000000 / 0x80000000 = 50.00%) -/// -/// BB#4: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#0 BB#1 -/// %9 = PHI %8, , %0, ; +/// BCC 76, %5, <%bb.4>; CRRC:%5 +/// Successors according to CFG: %bb.1(0x2aaaaaaa / 0x80000000 = 33.33%) +/// %bb.4(0x55555554 / 0x80000000 = 66.67%) +/// +/// %bb.1: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.0 +/// Successors according to CFG: %bb.4(0x40000000 / 0x80000000 = 50.00%) +/// +/// %bb.4: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.0 %bb.1 +/// %9 = PHI %8, <%bb.1>, %0, <%bb.0>; /// F8RC:%9,%8,%0 -/// %13 = PHI %12, , %2, ; +/// %13 = PHI %12, <%bb.1>, %2, <%bb.0>; /// F8RC:%13,%12,%2 /// /// BLR8 %lr8, %rm, %f1 Index: llvm/trunk/lib/Target/PowerPC/PPCCTRLoops.cpp =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCCTRLoops.cpp +++ llvm/trunk/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -690,12 +690,11 @@ } if (I != BI && clobbersCTR(*I)) { - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " (" << - MBB->getFullName() << ") instruction " << *I << - " clobbers CTR, invalidating " << "BB#" << - BI->getParent()->getNumber() << " (" << - BI->getParent()->getFullName() << ") instruction " << - *BI << "\n"); + DEBUG(dbgs() << printMBBReference(*MBB) << " (" << MBB->getFullName() + << ") instruction " << *I << " clobbers CTR, invalidating " + << printMBBReference(*BI->getParent()) << " (" + << BI->getParent()->getFullName() << ") instruction " << *BI + << "\n"); return false; } @@ -709,10 +708,10 @@ if (CheckPreds) { queue_preds: if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) { - DEBUG(dbgs() << "Unable to find a MTCTR instruction for BB#" << - BI->getParent()->getNumber() << " (" << - BI->getParent()->getFullName() << ") instruction " << - *BI << "\n"); + DEBUG(dbgs() << "Unable to find a MTCTR instruction for " + << printMBBReference(*BI->getParent()) << " (" + << BI->getParent()->getFullName() << ") instruction " << *BI + << "\n"); return false; } Index: llvm/trunk/lib/Target/PowerPC/PPCExpandISEL.cpp =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCExpandISEL.cpp +++ llvm/trunk/lib/Target/PowerPC/PPCExpandISEL.cpp @@ -171,7 +171,7 @@ #ifndef NDEBUG void PPCExpandISEL::DumpISELInstructions() const { for (const auto &I : ISELInstructions) { - DEBUG(dbgs() << "BB#" << I.first << ":\n"); + DEBUG(dbgs() << printMBBReference(*MF->getBlockNumbered(I.first)) << ":\n"); for (const auto &VI : I.second) DEBUG(dbgs() << " "; VI->print(dbgs())); } @@ -191,7 +191,11 @@ void PPCExpandISEL::expandAndMergeISELs() { for (auto &BlockList : ISELInstructions) { - DEBUG(dbgs() << "Expanding ISEL instructions in BB#" << BlockList.first + + DEBUG(dbgs() << printMBBReference(*MF->getBlockNumbered(BlockList.first)) + << ":\n"); + DEBUG(dbgs() << "Expanding ISEL instructions in " + << printMBBReference(*MF->getBlockNumbered(BlockList.first)) << "\n"); BlockISELList &CurrentISELList = BlockList.second; Index: llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp +++ llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -686,7 +686,7 @@ DEBUG(LiMI->dump()); // There could be repeated registers in the PHI, e.g: %1 = - // PHI %6, , %8, , %8, ; So if we've + // PHI %6, <%bb.2>, %8, <%bb.3>, %8, <%bb.6>; So if we've // already replaced the def instruction, skip. if (LiMI->getOpcode() == PPC::ADDI || LiMI->getOpcode() == PPC::ADDI8) continue; @@ -1209,8 +1209,9 @@ DEBUG(BI1->dump()); DEBUG(BI2->dump()); if (IsPartiallyRedundant) { - DEBUG(dbgs() << "The following compare is moved into BB#" << - MBBtoMoveCmp->getNumber() << " to handle partial redundancy.\n"); + DEBUG(dbgs() << "The following compare is moved into " + << printMBBReference(*MBBtoMoveCmp) + << " to handle partial redundancy.\n"); DEBUG(CMPI2->dump()); } Index: llvm/trunk/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp +++ llvm/trunk/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp @@ -966,7 +966,7 @@ dbgs() << format("%6d", ID); dbgs() << format("%6d", EC->getLeaderValue(ID)); - dbgs() << format(" BB#%3d", MI->getParent()->getNumber()); + dbgs() << format(" %bb.%3d", MI->getParent()->getNumber()); dbgs() << format(" %14s ", TII->getName(MI->getOpcode()).str().c_str()); if (SwapVector[EntryIdx].IsLoad) Index: llvm/trunk/lib/Target/PowerPC/README.txt =================================================================== --- llvm/trunk/lib/Target/PowerPC/README.txt +++ llvm/trunk/lib/Target/PowerPC/README.txt @@ -256,7 +256,7 @@ cmpwi cr0, r3, 0 li r2, 0 blt cr0, LBB1_2 -; BB#1: ; %entry +; %bb.1: ; %entry mr r2, r3 LBB1_2: ; %entry mr r3, r2 Index: llvm/trunk/lib/Target/PowerPC/README_ALTIVEC.txt =================================================================== --- llvm/trunk/lib/Target/PowerPC/README_ALTIVEC.txt +++ llvm/trunk/lib/Target/PowerPC/README_ALTIVEC.txt @@ -233,7 +233,7 @@ Produces the following code with -mtriple=powerpc64-unknown-linux-gnu: -# BB#0: # %entry +# %bb.0: # %entry addis 3, 2, .LCPI0_0@toc@ha addis 4, 2, .LCPI0_1@toc@ha addi 3, 3, .LCPI0_0@toc@l Index: llvm/trunk/lib/Target/README.txt =================================================================== --- llvm/trunk/lib/Target/README.txt +++ llvm/trunk/lib/Target/README.txt @@ -1778,7 +1778,7 @@ instcombine should catch it earlier: _foo: ## @foo -## BB#0: ## %entry +## %bb.0: ## %entry movl %edi, %eax sarl $4, %eax ret @@ -2234,13 +2234,13 @@ which we compile to: foo: # @foo -# BB#0: # %entry +# %bb.0: # %entry pushq %rbp movq %rsp, %rbp testl %esi, %esi movq %rdi, %rax je .LBB0_2 -# BB#1: # %if.then +# %bb.1: # %if.then movl $5, %edi callq *%rax popq %rbp Index: llvm/trunk/lib/Target/SystemZ/SystemZMachineScheduler.cpp =================================================================== --- llvm/trunk/lib/Target/SystemZ/SystemZMachineScheduler.cpp +++ llvm/trunk/lib/Target/SystemZ/SystemZMachineScheduler.cpp @@ -74,7 +74,7 @@ void SystemZPostRASchedStrategy::enterMBB(MachineBasicBlock *NextMBB) { assert ((SchedStates.find(NextMBB) == SchedStates.end()) && "Entering MBB twice?"); - DEBUG (dbgs() << "+++ Entering MBB#" << NextMBB->getNumber()); + DEBUG(dbgs() << "+++ Entering " << printMBBReference(*NextMBB)); MBB = NextMBB; /// Create a HazardRec for MBB, save it in SchedStates and set HazardRec to @@ -93,8 +93,8 @@ SchedStates.find(SinglePredMBB) == SchedStates.end()) return; - DEBUG (dbgs() << "+++ Continued scheduling from MBB#" - << SinglePredMBB->getNumber() << "\n";); + DEBUG(dbgs() << "+++ Continued scheduling from " + << printMBBReference(*SinglePredMBB) << "\n";); HazardRec->copyState(SchedStates[SinglePredMBB]); @@ -113,7 +113,7 @@ } void SystemZPostRASchedStrategy::leaveMBB() { - DEBUG (dbgs() << "+++ Leaving MBB#" << MBB->getNumber() << "\n";); + DEBUG(dbgs() << "+++ Leaving " << printMBBReference(*MBB) << "\n";); // Advance to first terminator. The successor block will handle terminators // dependent on CFG layout (T/NT branch etc). Index: llvm/trunk/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp =================================================================== --- llvm/trunk/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp +++ llvm/trunk/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp @@ -205,8 +205,7 @@ continue; unsigned Index = MIB.getInstr()->getNumExplicitOperands() - 1; - DEBUG(dbgs() << "MBB#" << MBB->getNumber() << " has index " << Index - << "\n"); + DEBUG(dbgs() << printMBBReference(*MBB) << " has index " << Index << "\n"); Pair.first->second = Index; for (auto Pred : MBB->predecessors()) Index: llvm/trunk/lib/Target/X86/README.txt =================================================================== --- llvm/trunk/lib/Target/X86/README.txt +++ llvm/trunk/lib/Target/X86/README.txt @@ -987,11 +987,11 @@ to: foo: # @foo -# BB#0: # %entry +# %bb.0: # %entry movl 4(%esp), %ecx cmpb $0, 16(%esp) je .LBB0_2 -# BB#1: # %bb +# %bb.1: # %bb movl 8(%esp), %eax addl %ecx, %eax ret @@ -1073,7 +1073,7 @@ This compiles into: _abort_gzip: ## @abort_gzip -## BB#0: ## %entry +## %bb.0: ## %entry subl $12, %esp movb _in_exit.4870.b, %al cmpb $1, %al @@ -1396,7 +1396,7 @@ } bar: # @bar -# BB#0: +# %bb.0: movb (%rdi), %al andb $1, %al movzbl %al, %eax @@ -1633,7 +1633,7 @@ code we generate: _foo: ## @foo -## BB#0: ## %entry +## %bb.0: ## %entry movb (%rsi), %al movb (%rdi), %cl cmpb %al, %cl @@ -1646,12 +1646,12 @@ movb 1(%rdi), %cl cmpb %al, %cl jne LBB0_1 -## BB#3: ## %if.end38 +## %bb.3: ## %if.end38 movb 2(%rsi), %al movb 2(%rdi), %cl cmpb %al, %cl jne LBB0_1 -## BB#4: ## %if.end60 +## %bb.4: ## %if.end60 movb 3(%rdi), %al cmpb 3(%rsi), %al LBB0_5: ## %if.end60 Index: llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp +++ llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp @@ -188,16 +188,17 @@ /// necessary (e.g. due to register coalescing with a "truncate" copy). /// So, it handles pattern like this: /// -/// BB#2: derived from LLVM BB %if.then +/// %bb.2: derived from LLVM BB %if.then /// Live Ins: %rdi -/// Predecessors according to CFG: BB#0 -/// %ax = MOV16rm %rdi, 1, %noreg, 0, %noreg, %eax; mem:LD2[%p] +/// Predecessors according to CFG: %bb.0 +/// %ax = MOV16rm %rdi, 1, %noreg, 0, %noreg, %eax; +/// mem:LD2[%p] /// No %eax -/// Successors according to CFG: BB#3(?%) +/// Successors according to CFG: %bb.3(?%) /// -/// BB#3: derived from LLVM BB %if.end +/// %bb.3: derived from LLVM BB %if.end /// Live Ins: %eax Only %ax is actually live -/// Predecessors according to CFG: BB#2 BB#1 +/// Predecessors according to CFG: %bb.2 %bb.1 /// %ax = KILL %ax, %eax /// RET 0, %ax static bool isLive(const MachineInstr &MI, Index: llvm/trunk/lib/Target/X86/X86FloatingPoint.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86FloatingPoint.cpp +++ llvm/trunk/lib/Target/X86/X86FloatingPoint.cpp @@ -499,7 +499,7 @@ /// setupBlockStack - Use the live bundles to set up our model of the stack /// to match predecessors' live out stack. void FPS::setupBlockStack() { - DEBUG(dbgs() << "\nSetting up live-ins for BB#" << MBB->getNumber() + DEBUG(dbgs() << "\nSetting up live-ins for " << printMBBReference(*MBB) << " derived from " << MBB->getName() << ".\n"); StackTop = 0; // Get the live-in bundle for MBB. @@ -538,7 +538,7 @@ if (MBB->succ_empty()) return; - DEBUG(dbgs() << "Setting up live-outs for BB#" << MBB->getNumber() + DEBUG(dbgs() << "Setting up live-outs for " << printMBBReference(*MBB) << " derived from " << MBB->getName() << ".\n"); // Get MBB's live-out bundle. Index: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -58,19 +58,19 @@ ; CHECK: body: ; ; ABI/constant lowering and IR-level entry basic block. -; CHECK: {{bb.[0-9]+}}.entry: +; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}: ; ; Make sure we have one successor and only one. -; CHECK-NEXT: successors: %[[BB2:bb.[0-9]+.bb2]](0x80000000) +; CHECK-NEXT: successors: %[[BB2:bb.[0-9]+]](0x80000000) ; ; Check that we emit the correct branch. ; CHECK: G_BR %[[BB2]] ; ; Check that end contains the return instruction. -; CHECK: [[END:bb.[0-9]+.end]]: +; CHECK: [[END:bb.[0-9]+]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: RET_ReallyLR ; -; CHECK: {{bb.[0-9]+}}.bb2: +; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: successors: %[[END]](0x80000000) ; CHECK: G_BR %[[END]] define void @uncondbr() { @@ -84,11 +84,11 @@ ; CHECK-LABEL: name: uncondbr_fallthrough ; CHECK: body: -; CHECK: {{bb.[0-9]+}}.entry: -; CHECK-NEXT: successors: %[[END:bb.[0-9]+.end]](0x80000000) +; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[END:bb.[0-9]+]](0x80000000) ; We don't emit a branch here, as we can fallthrough to the successor. ; CHECK-NOT: G_BR -; CHECK: [[END]]: +; CHECK: [[END]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: RET_ReallyLR define void @uncondbr_fallthrough() { entry: @@ -102,10 +102,10 @@ ; CHECK: body: ; ; ABI/constant lowering and IR-level entry basic block. -; CHECK: {{bb.[0-9]+}} (%ir-block.{{[0-9]+}}): +; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}): ; Make sure we have two successors -; CHECK-NEXT: successors: %[[TRUE:bb.[0-9]+.true]](0x40000000), -; CHECK: %[[FALSE:bb.[0-9]+.false]](0x40000000) +; CHECK-NEXT: successors: %[[TRUE:bb.[0-9]+]](0x40000000), +; CHECK: %[[FALSE:bb.[0-9]+]](0x40000000) ; ; CHECK: [[ADDR:%.*]]:_(p0) = COPY %x0 ; @@ -115,9 +115,9 @@ ; CHECK: G_BR %[[FALSE]] ; ; Check that each successor contains the return instruction. -; CHECK: [[TRUE]]: +; CHECK: [[TRUE]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: RET_ReallyLR -; CHECK: [[FALSE]]: +; CHECK: [[FALSE]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: RET_ReallyLR define void @condbr(i1* %tstaddr) { %tst = load i1, i1* %tstaddr @@ -133,8 +133,8 @@ ; CHECK-LABEL: name: switch ; CHECK: body: ; -; CHECK: {{bb.[0-9]+.entry}}: -; CHECK-NEXT: successors: %[[BB_CASE100:bb.[0-9]+.case100]](0x40000000), %[[BB_NOTCASE100_CHECKNEXT:bb.[0-9]+.entry]](0x40000000) +; CHECK: bb.{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[BB_CASE100:bb.[0-9]+]](0x40000000), %[[BB_NOTCASE100_CHECKNEXT:bb.[0-9]+]](0x40000000) ; CHECK: %0:_(s32) = COPY %w0 ; CHECK: %[[reg100:[0-9]+]]:_(s32) = G_CONSTANT i32 100 ; CHECK: %[[reg200:[0-9]+]]:_(s32) = G_CONSTANT i32 200 @@ -145,31 +145,31 @@ ; CHECK: G_BRCOND %[[regicmp100]](s1), %[[BB_CASE100]] ; CHECK: G_BR %[[BB_NOTCASE100_CHECKNEXT]] ; -; CHECK: [[BB_NOTCASE100_CHECKNEXT]]: -; CHECK-NEXT: successors: %[[BB_CASE200:bb.[0-9]+.case200]](0x40000000), %[[BB_NOTCASE200_CHECKNEXT:bb.[0-9]+.entry]](0x40000000) +; CHECK: [[BB_NOTCASE100_CHECKNEXT]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[BB_CASE200:bb.[0-9]+]](0x40000000), %[[BB_NOTCASE200_CHECKNEXT:bb.[0-9]+]](0x40000000) ; CHECK: %[[regicmp200:[0-9]+]]:_(s1) = G_ICMP intpred(eq), %[[reg200]](s32), %0 ; CHECK: G_BRCOND %[[regicmp200]](s1), %[[BB_CASE200]] ; CHECK: G_BR %[[BB_NOTCASE200_CHECKNEXT]] ; -; CHECK: [[BB_NOTCASE200_CHECKNEXT]]: -; CHECK-NEXT: successors: %[[BB_DEFAULT:bb.[0-9]+.default]](0x80000000) +; CHECK: [[BB_NOTCASE200_CHECKNEXT]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[BB_DEFAULT:bb.[0-9]+]](0x80000000) ; CHECK: G_BR %[[BB_DEFAULT]] ; -; CHECK: [[BB_DEFAULT]]: -; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+.return]](0x80000000) +; CHECK: [[BB_DEFAULT]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+]](0x80000000) ; CHECK: %[[regretdefault:[0-9]+]]:_(s32) = G_ADD %0, %[[reg0]] ; CHECK: G_BR %[[BB_RET]] ; -; CHECK: [[BB_CASE100]]: -; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+.return]](0x80000000) +; CHECK: [[BB_CASE100]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+]](0x80000000) ; CHECK: %[[regretc100:[0-9]+]]:_(s32) = G_ADD %0, %[[reg1]] ; CHECK: G_BR %[[BB_RET]] ; -; CHECK: [[BB_CASE200]]: +; CHECK: [[BB_CASE200]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: successors: %[[BB_RET]](0x80000000) ; CHECK: %[[regretc200:[0-9]+]]:_(s32) = G_ADD %0, %[[reg2]] ; -; CHECK: [[BB_RET]]: +; CHECK: [[BB_RET]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: %[[regret:[0-9]+]]:_(s32) = G_PHI %[[regretdefault]](s32), %[[BB_DEFAULT]], %[[regretc100]](s32), %[[BB_CASE100]] ; CHECK: %w0 = COPY %[[regret]](s32) ; CHECK: RET_ReallyLR implicit %w0 @@ -202,16 +202,16 @@ ; %entry block is no longer a predecessor for the phi instruction. We need to ; use the correct lowered MachineBasicBlock instead. ; CHECK-LABEL: name: test_cfg_remap -; CHECK: {{bb.[0-9]+.entry}}: -; CHECK-NEXT: successors: %{{bb.[0-9]+.next}}(0x40000000), %[[NOTCASE1_BLOCK:bb.[0-9]+.entry]](0x40000000) -; CHECK: [[NOTCASE1_BLOCK]]: -; CHECK-NEXT: successors: %{{bb.[0-9]+.other}}(0x40000000), %[[NOTCASE57_BLOCK:bb.[0-9]+.entry]](0x40000000) -; CHECK: [[NOTCASE57_BLOCK]]: -; CHECK-NEXT: successors: %[[PHI_BLOCK:bb.[0-9]+.phi.block]](0x80000000) +; CHECK: bb.{{[0-9]+.[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %{{bb.[0-9]+}}(0x40000000), %[[NOTCASE1_BLOCK:bb.[0-9]+]](0x40000000) +; CHECK: [[NOTCASE1_BLOCK]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %{{bb.[0-9]+}}(0x40000000), %[[NOTCASE57_BLOCK:bb.[0-9]+]](0x40000000) +; CHECK: [[NOTCASE57_BLOCK]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[PHI_BLOCK:bb.[0-9]+]](0x80000000) ; CHECK: G_BR %[[PHI_BLOCK]] ; -; CHECK: [[PHI_BLOCK]]: -; CHECK-NEXT: G_PHI %{{.*}}(s32), %[[NOTCASE57_BLOCK:bb.[0-9]+.entry]], %{{.*}}(s32), +; CHECK: [[PHI_BLOCK]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: G_PHI %{{.*}}(s32), %[[NOTCASE57_BLOCK:bb.[0-9]+]], %{{.*}}(s32), ; define i32 @test_cfg_remap(i32 %in) { entry: @@ -230,7 +230,7 @@ } ; CHECK-LABEL: name: test_cfg_remap_multiple_preds -; CHECK: G_PHI [[ENTRY:%.*]](s32), %bb.{{[0-9]+}}.entry, [[ENTRY]](s32), %bb.{{[0-9]+}}.entry +; CHECK: G_PHI [[ENTRY:%.*]](s32), %bb.{{[0-9]+}}, [[ENTRY]](s32), %bb.{{[0-9]+}} define i32 @test_cfg_remap_multiple_preds(i32 %in) { entry: switch i32 %in, label %odd [i32 1, label %next @@ -256,19 +256,19 @@ ; CHECK: body: ; ; ABI/constant lowering and IR-level entry basic block. -; CHECK: {{bb.[0-9]+.entry}}: +; CHECK: bb.{{[0-9]+.[a-zA-Z0-9.]+}}: ; Make sure we have one successor -; CHECK-NEXT: successors: %[[BB_L1:bb.[0-9]+.L1]](0x80000000) +; CHECK-NEXT: successors: %[[BB_L1:bb.[0-9]+]](0x80000000) ; CHECK-NOT: G_BR ; ; Check basic block L1 has 2 successors: BBL1 and BBL2 -; CHECK: [[BB_L1]] (address-taken): +; CHECK: [[BB_L1]].{{[a-zA-Z0-9.]+}} (address-taken): ; CHECK-NEXT: successors: %[[BB_L1]](0x40000000), -; CHECK: %[[BB_L2:bb.[0-9]+.L2]](0x40000000) +; CHECK: %[[BB_L2:bb.[0-9]+]](0x40000000) ; CHECK: G_BRINDIRECT %{{[0-9]+}}(p0) ; ; Check basic block L2 is the return basic block -; CHECK: [[BB_L2]] (address-taken): +; CHECK: [[BB_L2]].{{[a-zA-Z0-9.]+}} (address-taken): ; CHECK-NEXT: RET_ReallyLR @indirectbr.L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@indirectbr, %L1), i8* blockaddress(@indirectbr, %L2), i8* null], align 8 @@ -410,11 +410,11 @@ ; CHECK-LABEL: name: trivial_bitcast_with_copy ; CHECK: [[A:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: G_BR %[[CAST:bb\.[0-9]+.cast]] +; CHECK: G_BR %[[CAST:bb\.[0-9]+]] -; CHECK: [[END:bb\.[0-9]+.end]]: +; CHECK: [[END:bb\.[0-9]+]].{{[a-zA-Z0-9.]+}}: -; CHECK: [[CAST]]: +; CHECK: [[CAST]].{{[a-zA-Z0-9.]+}}: ; CHECK: {{%[0-9]+}}:_(p0) = COPY [[A]] ; CHECK: G_BR %[[END]] define i64* @trivial_bitcast_with_copy(i8* %a) { @@ -512,13 +512,13 @@ } ; CHECK-LABEL: name: test_phi -; CHECK: G_BRCOND {{%.*}}, %[[TRUE:bb\.[0-9]+.true]] -; CHECK: G_BR %[[FALSE:bb\.[0-9]+.false]] +; CHECK: G_BRCOND {{%.*}}, %[[TRUE:bb\.[0-9]+]] +; CHECK: G_BR %[[FALSE:bb\.[0-9]+]] -; CHECK: [[TRUE]]: +; CHECK: [[TRUE]].{{[a-zA-Z0-9.]+}}: ; CHECK: [[RES1:%[0-9]+]]:_(s32) = G_LOAD -; CHECK: [[FALSE]]: +; CHECK: [[FALSE]].{{[a-zA-Z0-9.]+}}: ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_LOAD ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_PHI [[RES1]](s32), %[[TRUE]], [[RES2]](s32), %[[FALSE]] @@ -554,7 +554,7 @@ ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0 ; CHECK: [[ONE:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 -; CHECK: {{bb.[0-9]+}}.next: +; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}: ; CHECK: [[SUM1:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]] ; CHECK: [[SUM2:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]] ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ADD [[SUM1]], [[SUM2]] @@ -1226,7 +1226,7 @@ ; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}): ; CHECK: [[VAL_INT:%[0-9]+]]:_(s32) = G_CONSTANT i32 42 ; CHECK: [[VAL:%[0-9]+]]:_(p0) = G_INTTOPTR [[VAL_INT]](s32) -; CHECK: {{bb.[0-9]+}}.next: +; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}: br label %next next: Index: llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll +++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll @@ -9,7 +9,7 @@ ; CHECK-LABEL: name: bar ; CHECK: body: ; CHECK-NEXT: bb.1 (%ir-block.0): -; CHECK: successors: %[[GOOD:bb.[0-9]+.continue]]{{.*}}%[[BAD:bb.[0-9]+.broken]] +; CHECK: successors: %[[GOOD:bb.[0-9]+]]{{.*}}%[[BAD:bb.[0-9]+]] ; CHECK: EH_LABEL ; CHECK: %w0 = COPY ; CHECK: BL @foo, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0, implicit-def %w0 @@ -17,7 +17,7 @@ ; CHECK: EH_LABEL ; CHECK: G_BR %[[GOOD]] -; CHECK: [[BAD]] (landing-pad): +; CHECK: [[BAD]].{{[a-z]+}} (landing-pad): ; CHECK: EH_LABEL ; CHECK: [[UNDEF:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF ; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY %x0 @@ -30,7 +30,7 @@ ; CHECK: %x0 = COPY [[PTR_RET]] ; CHECK: %w1 = COPY [[SEL_RET]] -; CHECK: [[GOOD]]: +; CHECK: [[GOOD]].{{[a-z]+}}: ; CHECK: [[SEL:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: {{%[0-9]+}}:_(s128) = G_INSERT {{%[0-9]+}}, [[SEL]](s32), 64 Index: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll +++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll @@ -10,9 +10,9 @@ ; CHECK: name: bar ; CHECK: body: ; CHECK-NEXT: bb.1 (%ir-block.0): -; CHECK: successors: %{{bb.[0-9]+.continue.*}}%[[LP:bb.[0-9]+.cleanup]] +; CHECK: successors: %{{bb.[0-9]+.*}}%[[LP:bb.[0-9]+]] -; CHECK: [[LP]] (landing-pad): +; CHECK: [[LP]].{{[a-z]+}} (landing-pad): ; CHECK: EH_LABEL ; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY %x0 Index: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir =================================================================== --- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir +++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir @@ -43,16 +43,16 @@ - { id: 16, class: _ } body: | ; CHECK-LABEL: name: test_simple - ; CHECK: bb.0.entry: - ; CHECK: successors: %bb.1.next(0x80000000) + ; CHECK: bb.0.{{[a-zA-Z0-9]+}}: + ; CHECK: successors: %bb.1(0x80000000) ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[COPY]](s64) ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[INTTOPTR]](p0) ; CHECK: %x0 = COPY [[PTRTOINT]](s64) - ; CHECK: G_BRCOND [[TRUNC]](s1), %bb.1.next - ; CHECK: bb.1.next: + ; CHECK: G_BRCOND [[TRUNC]](s1), %bb.1 + ; CHECK: bb.1.{{[a-zA-Z0-9]+}}: ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC2]], [[TRUNC3]] @@ -95,7 +95,7 @@ %6(s64) = G_PTRTOINT %5 %x0 = COPY %6 - G_BRCOND %1, %bb.1.next + G_BRCOND %1, %bb.1 bb.1.next: Index: llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir =================================================================== --- llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir +++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir @@ -59,19 +59,19 @@ # CHECK: %5:fpr(s32) = G_FCONSTANT float 2.000000e+00 # Second block will get the constant 1.0 when the localizer is enabled. -# CHECK: bb.1.true: +# CHECK: bb.1.{{[a-zA-Z0-9]+}}: # OPT-NOT: G_FCONSTANT # OPTNONE: [[FONE:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00 -# CHECK: G_BR %bb.3.end +# CHECK: G_BR %bb.3 # Thrid block will get the constant 2.0 when the localizer is enabled. -# CHECK: bb.2.false: +# CHECK: bb.2.{{[a-zA-Z0-9]+}}: # OPT-NOT: G_FCONSTANT # OPTNONE: [[FTWO:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 2.000000e+00 # CHECK: bb.3.end -# OPTNONE: %2:fpr(s32) = PHI [[FONE]](s32), %bb.1.true, [[FTWO]](s32), %bb.2.false -# OPT: %2:fpr(s32) = PHI %4(s32), %bb.1.true, %5(s32), %bb.2.false +# OPTNONE: %2:fpr(s32) = PHI [[FONE]](s32), %bb.1, [[FTWO]](s32), %bb.2 +# OPT: %2:fpr(s32) = PHI %4(s32), %bb.1, %5(s32), %bb.2 # CHECK-NEXT: G_FADD %0, %2 body: | bb.0 (%ir-block.0): @@ -82,16 +82,16 @@ %1(s1) = G_TRUNC %6 %4(s32) = G_FCONSTANT float 1.000000e+00 %5(s32) = G_FCONSTANT float 2.000000e+00 - G_BRCOND %1(s1), %bb.1.true - G_BR %bb.2.false + G_BRCOND %1(s1), %bb.1 + G_BR %bb.2 bb.1.true: - G_BR %bb.3.end + G_BR %bb.3 bb.2.false: bb.3.end: - %2(s32) = PHI %4(s32), %bb.1.true, %5(s32), %bb.2.false + %2(s32) = PHI %4(s32), %bb.1, %5(s32), %bb.2 %3(s32) = G_FADD %0, %2 %s0 = COPY %3(s32) RET_ReallyLR implicit %s0 Index: llvm/trunk/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll +++ llvm/trunk/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll @@ -508,12 +508,12 @@ ; CHECK: ldr ; CHECK-NEXT: nop ; CHECK-NEXT: .Ltmp -; CHECK-NEXT: BB +; CHECK-NEXT: %bb. ; CHECK-NEXT: madd ; CHECK-NOWORKAROUND-LABEL: fall_through ; CHECK-NOWORKAROUND: ldr ; CHECK-NOWORKAROUND-NEXT: .Ltmp -; CHECK-NOWORKAROUND-NEXT: BB +; CHECK-NOWORKAROUND-NEXT: %bb. ; CHECK-NOWORKAROUND-NEXT: madd ; No checks for this, just check it doesn't crash Index: llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll +++ llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll @@ -2,7 +2,7 @@ ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=machine-scheduler -aarch64-enable-stp-suppress=false -o - 2>&1 > /dev/null | FileCheck %s ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: stp_i64_scale:BB#0 +; CHECK-LABEL: stp_i64_scale:%bb.0 ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:Cluster ld/st SU(2) - SU(5) ; CHECK:SU(4): STRXui %1, %0, 1 @@ -23,7 +23,7 @@ } ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: stp_i32_scale:BB#0 +; CHECK-LABEL: stp_i32_scale:%bb.0 ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:Cluster ld/st SU(2) - SU(5) ; CHECK:SU(4): STRWui %1, %0, 1 @@ -44,7 +44,7 @@ } ; CHECK:********** MI Scheduling ********** -; CHECK-LABEL:stp_i64_unscale:BB#0 entry +; CHECK-LABEL:stp_i64_unscale:%bb.0 entry ; CHECK:Cluster ld/st SU(5) - SU(2) ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:SU(5): STURXi %1, %0, -32 @@ -65,7 +65,7 @@ } ; CHECK:********** MI Scheduling ********** -; CHECK-LABEL:stp_i32_unscale:BB#0 entry +; CHECK-LABEL:stp_i32_unscale:%bb.0 entry ; CHECK:Cluster ld/st SU(5) - SU(2) ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:SU(5): STURWi %1, %0, -16 @@ -86,7 +86,7 @@ } ; CHECK:********** MI Scheduling ********** -; CHECK-LABEL:stp_double:BB#0 +; CHECK-LABEL:stp_double:%bb.0 ; CHECK:Cluster ld/st SU(3) - SU(4) ; CHECK:Cluster ld/st SU(2) - SU(5) ; CHECK:SU(3): STRDui %1, %0, 1 @@ -107,7 +107,7 @@ } ; CHECK:********** MI Scheduling ********** -; CHECK-LABEL:stp_float:BB#0 +; CHECK-LABEL:stp_float:%bb.0 ; CHECK:Cluster ld/st SU(3) - SU(4) ; CHECK:Cluster ld/st SU(2) - SU(5) ; CHECK:SU(3): STRSui %1, %0, 1 @@ -128,7 +128,7 @@ } ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: stp_volatile:BB#0 +; CHECK-LABEL: stp_volatile:%bb.0 ; CHECK-NOT: Cluster ld/st ; CHECK:SU(2): STRXui %1, %0, 3; mem:Volatile ; CHECK:SU(3): STRXui %1, %0, 2; mem:Volatile Index: llvm/trunk/test/CodeGen/AArch64/analyze-branch.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/analyze-branch.ll +++ llvm/trunk/test/CodeGen/AArch64/analyze-branch.ll @@ -18,7 +18,7 @@ ; CHECK: cmp {{w[0-9]+}}, #42 ; CHECK: b.ne [[FALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_true ; CHECK: [[FALSE]]: @@ -41,7 +41,7 @@ ; CHECK: cmp {{w[0-9]+}}, #42 ; CHECK: b.eq [[TRUE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_false ; CHECK: [[TRUE]]: @@ -62,7 +62,7 @@ br i1 %tst, label %true, label %false, !prof !0 ; CHECK: cbnz {{w[0-9]+}}, [[FALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_true ; CHECK: [[FALSE]]: @@ -83,7 +83,7 @@ br i1 %tst, label %true, label %false, !prof !1 ; CHECK: cbz {{x[0-9]+}}, [[TRUE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_false ; CHECK: [[TRUE]]: @@ -104,7 +104,7 @@ br i1 %tst, label %true, label %false, !prof !0 ; CHECK: cbz {{w[0-9]+}}, [[FALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_true ; CHECK: [[FALSE]]: @@ -125,7 +125,7 @@ br i1 %tst, label %true, label %false, !prof !1 ; CHECK: cbnz {{x[0-9]+}}, [[TRUE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_false ; CHECK: [[TRUE]]: @@ -147,7 +147,7 @@ br i1 %tst, label %true, label %false, !prof !0 ; CHECK: tbnz {{w[0-9]+}}, #15, [[FALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_true ; CHECK: [[FALSE]]: @@ -169,7 +169,7 @@ br i1 %tst, label %true, label %false, !prof !1 ; CHECK: tbz {{[wx][0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_false ; CHECK: [[TRUE]]: @@ -192,7 +192,7 @@ br i1 %tst, label %true, label %false, !prof !0 ; CHECK: tbz {{w[0-9]+}}, #15, [[FALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_true ; CHECK: [[FALSE]]: @@ -214,7 +214,7 @@ br i1 %tst, label %true, label %false, !prof !1 ; CHECK: tbnz {{[wx][0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_false ; CHECK: [[TRUE]]: Index: llvm/trunk/test/CodeGen/AArch64/arm64-ccmp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-ccmp.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-ccmp.ll @@ -132,6 +132,7 @@ ; Floating point compare. ; CHECK: single_fcmp +; CHECK: ; %bb. ; CHECK: cmp ; CHECK-NOT: b. ; CHECK: fccmp {{.*}}, #8, ge @@ -448,7 +449,7 @@ ; Test the IR CCs that expand to two cond codes. ; CHECK-LABEL: select_and_olt_one: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #4, mi ; CHECK-NEXT: fccmp d2, d3, #1, ne @@ -463,7 +464,7 @@ } ; CHECK-LABEL: select_and_one_olt: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d0, d1, #1, ne ; CHECK-NEXT: fccmp d2, d3, #0, vc @@ -478,7 +479,7 @@ } ; CHECK-LABEL: select_and_olt_ueq: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #0, mi ; CHECK-NEXT: fccmp d2, d3, #8, le @@ -493,7 +494,7 @@ } ; CHECK-LABEL: select_and_ueq_olt: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d0, d1, #8, le ; CHECK-NEXT: fccmp d2, d3, #0, pl @@ -508,7 +509,7 @@ } ; CHECK-LABEL: select_or_olt_one: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #0, pl ; CHECK-NEXT: fccmp d2, d3, #8, le @@ -523,7 +524,7 @@ } ; CHECK-LABEL: select_or_one_olt: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d0, d1, #1, ne ; CHECK-NEXT: fccmp d2, d3, #8, vs @@ -538,7 +539,7 @@ } ; CHECK-LABEL: select_or_olt_ueq: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #4, pl ; CHECK-NEXT: fccmp d2, d3, #1, ne @@ -553,7 +554,7 @@ } ; CHECK-LABEL: select_or_ueq_olt: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d0, d1, #8, le ; CHECK-NEXT: fccmp d2, d3, #8, mi @@ -568,7 +569,7 @@ } ; CHECK-LABEL: select_or_olt_ogt_ueq: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #0, pl ; CHECK-NEXT: fccmp d4, d5, #4, le @@ -586,7 +587,7 @@ } ; CHECK-LABEL: select_or_olt_ueq_ogt: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #4, pl ; CHECK-NEXT: fccmp d2, d3, #1, ne @@ -606,7 +607,7 @@ ; Verify that we correctly promote f16. ; CHECK-LABEL: half_select_and_olt_oge: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-DAG: fcvt [[S0:s[0-9]+]], h0 ; CHECK-DAG: fcvt [[S1:s[0-9]+]], h1 ; CHECK-NEXT: fcmp [[S0]], [[S1]] @@ -624,7 +625,7 @@ } ; CHECK-LABEL: half_select_and_olt_one: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-DAG: fcvt [[S0:s[0-9]+]], h0 ; CHECK-DAG: fcvt [[S1:s[0-9]+]], h1 ; CHECK-NEXT: fcmp [[S0]], [[S1]] Index: llvm/trunk/test/CodeGen/AArch64/arm64-fp128.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-fp128.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-fp128.ll @@ -195,7 +195,7 @@ iftrue: ret i32 42 -; CHECK-NEXT: BB# +; CHECK-NEXT: %bb. ; CHECK-NEXT: mov w0, #42 ; CHECK: ret iffalse: @@ -211,7 +211,7 @@ store fp128 %val, fp128* @lhs, align 16 ; CHECK: tst w0, #0x1 ; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: BB# +; CHECK-NEXT: %bb. ; CHECK-NEXT: mov v[[VAL:[0-9]+]].16b, v0.16b ; CHECK-NEXT: [[IFFALSE]]: ; CHECK: str q[[VAL]], [{{x[0-9]+}}, :lo12:lhs] Index: llvm/trunk/test/CodeGen/AArch64/arm64-icmp-opt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-icmp-opt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-icmp-opt.ll @@ -7,7 +7,7 @@ define i32 @t1(i64 %a) { ; CHECK-LABEL: t1: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: lsr x8, x0, #63 ; CHECK-NEXT: eor w0, w8, #0x1 ; CHECK-NEXT: ret Index: llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll @@ -6176,7 +6176,7 @@ ; Check for dependencies between the vector and the scalar load. define <4 x float> @test_v4f32_post_reg_ld1lane_dep_vec_on_load(float* %bar, float** %ptr, i64 %inc, <4 x float>* %dep_ptr_1, <4 x float>* %dep_ptr_2, <4 x float> %vec) { ; CHECK-LABEL: test_v4f32_post_reg_ld1lane_dep_vec_on_load: -; CHECK: BB#0: +; CHECK: %bb.0: ; CHECK-NEXT: ldr s[[LD:[0-9]+]], [x0] ; CHECK-NEXT: str q0, [x3] ; CHECK-NEXT: ldr q0, [x4] Index: llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll @@ -4,12 +4,12 @@ ; Test ldr clustering. ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldr_int:BB#0 +; CHECK-LABEL: ldr_int:%bb.0 ; CHECK: Cluster ld/st SU(1) - SU(2) ; CHECK: SU(1): %{{[0-9]+}} = LDRWui ; CHECK: SU(2): %{{[0-9]+}} = LDRWui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldr_int:BB#0 +; EXYNOS-LABEL: ldr_int:%bb.0 ; EXYNOS: Cluster ld/st SU(1) - SU(2) ; EXYNOS: SU(1): %{{[0-9]+}} = LDRWui ; EXYNOS: SU(2): %{{[0-9]+}} = LDRWui @@ -24,12 +24,12 @@ ; Test ldpsw clustering ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldp_sext_int:BB#0 +; CHECK-LABEL: ldp_sext_int:%bb.0 ; CHECK: Cluster ld/st SU(1) - SU(2) ; CHECK: SU(1): %{{[0-9]+}} = LDRSWui ; CHECK: SU(2): %{{[0-9]+}} = LDRSWui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldp_sext_int:BB#0 +; EXYNOS-LABEL: ldp_sext_int:%bb.0 ; EXYNOS: Cluster ld/st SU(1) - SU(2) ; EXYNOS: SU(1): %{{[0-9]+}} = LDRSWui ; EXYNOS: SU(2): %{{[0-9]+}} = LDRSWui @@ -45,12 +45,12 @@ ; Test ldur clustering. ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldur_int:BB#0 +; CHECK-LABEL: ldur_int:%bb.0 ; CHECK: Cluster ld/st SU(2) - SU(1) ; CHECK: SU(1): %{{[0-9]+}} = LDURWi ; CHECK: SU(2): %{{[0-9]+}} = LDURWi ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldur_int:BB#0 +; EXYNOS-LABEL: ldur_int:%bb.0 ; EXYNOS: Cluster ld/st SU(2) - SU(1) ; EXYNOS: SU(1): %{{[0-9]+}} = LDURWi ; EXYNOS: SU(2): %{{[0-9]+}} = LDURWi @@ -65,12 +65,12 @@ ; Test sext + zext clustering. ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldp_half_sext_zext_int:BB#0 +; CHECK-LABEL: ldp_half_sext_zext_int:%bb.0 ; CHECK: Cluster ld/st SU(3) - SU(4) ; CHECK: SU(3): %{{[0-9]+}} = LDRSWui ; CHECK: SU(4): %{{[0-9]+}}:sub_32 = LDRWui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldp_half_sext_zext_int:BB#0 +; EXYNOS-LABEL: ldp_half_sext_zext_int:%bb.0 ; EXYNOS: Cluster ld/st SU(3) - SU(4) ; EXYNOS: SU(3): %{{[0-9]+}} = LDRSWui ; EXYNOS: SU(4): %{{[0-9]+}}:sub_32 = LDRWui @@ -88,12 +88,12 @@ ; Test zext + sext clustering. ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldp_half_zext_sext_int:BB#0 +; CHECK-LABEL: ldp_half_zext_sext_int:%bb.0 ; CHECK: Cluster ld/st SU(3) - SU(4) ; CHECK: SU(3): %{{[0-9]+}}:sub_32 = LDRWui ; CHECK: SU(4): %{{[0-9]+}} = LDRSWui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldp_half_zext_sext_int:BB#0 +; EXYNOS-LABEL: ldp_half_zext_sext_int:%bb.0 ; EXYNOS: Cluster ld/st SU(3) - SU(4) ; EXYNOS: SU(3): %{{[0-9]+}}:sub_32 = LDRWui ; EXYNOS: SU(4): %{{[0-9]+}} = LDRSWui @@ -111,12 +111,12 @@ ; Verify we don't cluster volatile loads. ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldr_int_volatile:BB#0 +; CHECK-LABEL: ldr_int_volatile:%bb.0 ; CHECK-NOT: Cluster ld/st ; CHECK: SU(1): %{{[0-9]+}} = LDRWui ; CHECK: SU(2): %{{[0-9]+}} = LDRWui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldr_int_volatile:BB#0 +; EXYNOS-LABEL: ldr_int_volatile:%bb.0 ; EXYNOS-NOT: Cluster ld/st ; EXYNOS: SU(1): %{{[0-9]+}} = LDRWui ; EXYNOS: SU(2): %{{[0-9]+}} = LDRWui @@ -131,12 +131,12 @@ ; Test ldq clustering (no clustering for Exynos). ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldq_cluster:BB#0 +; CHECK-LABEL: ldq_cluster:%bb.0 ; CHECK: Cluster ld/st SU(1) - SU(3) ; CHECK: SU(1): %{{[0-9]+}} = LDRQui ; CHECK: SU(3): %{{[0-9]+}} = LDRQui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldq_cluster:BB#0 +; EXYNOS-LABEL: ldq_cluster:%bb.0 ; EXYNOS-NOT: Cluster ld/st define <2 x i64> @ldq_cluster(i64* %p) { %a1 = bitcast i64* %p to <2 x i64>* Index: llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A53.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A53.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A53.ll @@ -8,7 +8,7 @@ ; ; CHECK: ********** MI Scheduling ********** ; CHECK: main -; CHECK: *** Final schedule for BB#2 *** +; CHECK: *** Final schedule for %bb.2 *** ; CHECK: MADDWrrr ; CHECK: ADDWri ; CHECK: ********** INTERVALS ********** @@ -83,8 +83,8 @@ ; after it, this test checks to make sure there are more than one. ; ; CHECK: ********** MI Scheduling ********** -; CHECK: neon4xfloat:BB#0 -; CHECK: *** Final schedule for BB#0 *** +; CHECK: neon4xfloat:%bb.0 +; CHECK: *** Final schedule for %bb.0 *** ; CHECK: FDIVv4f32 ; CHECK: FADDv4f32 ; CHECK: FADDv4f32 @@ -130,7 +130,7 @@ ; are otherwise ready are jammed in the pending queue. ; CHECK: ********** MI Scheduling ********** ; CHECK: testResourceConflict -; CHECK: *** Final schedule for BB#0 *** +; CHECK: *** Final schedule for %bb.0 *** ; CHECK: BRK ; CHECK: ********** INTERVALS ********** define void @testResourceConflict(float* %ptr) { @@ -178,7 +178,7 @@ ; Resource contention on LDST. ; CHECK: ********** MI Scheduling ********** ; CHECK: testLdStConflict -; CHECK: *** Final schedule for BB#1 *** +; CHECK: *** Final schedule for %bb.1 *** ; CHECK: LD4Fourv2d ; CHECK: STRQui ; CHECK: ********** INTERVALS ********** Index: llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A57.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A57.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A57.ll @@ -8,10 +8,10 @@ ; ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -enable-misched -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s ; CHECK: ********** MI Scheduling ********** -; CHECK: main:BB#2 +; CHECK: main:%bb.2 ; CHECK: LDR ; CHECK: Latency : 4 -; CHECK: *** Final schedule for BB#2 *** +; CHECK: *** Final schedule for %bb.2 *** ; CHECK: LDR ; CHECK: LDR ; CHECK-NOT: LDR Index: llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll @@ -4,7 +4,7 @@ ; Test for bug in misched memory dependency calculation. ; ; CHECK: ********** MI Scheduling ********** -; CHECK: misched_bug:BB#0 entry +; CHECK: misched_bug:%bb.0 entry ; CHECK: SU(2): %2 = LDRWui %0, 1; mem:LD4[%ptr1_plus1] GPR32:%2 GPR64common:%0 ; CHECK: Successors: ; CHECK-NEXT: SU(5): Data Latency=4 Reg=%2 Index: llvm/trunk/test/CodeGen/AArch64/arm64-variadic-aapcs.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-variadic-aapcs.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-variadic-aapcs.ll @@ -113,7 +113,7 @@ define void @test_va_end() nounwind { ; CHECK-LABEL: test_va_end: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 %addr = bitcast %va_list* @var to i8* call void @llvm.va_end(i8* %addr) Index: llvm/trunk/test/CodeGen/AArch64/bics.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/bics.ll +++ llvm/trunk/test/CodeGen/AArch64/bics.ll @@ -2,7 +2,7 @@ define i1 @andn_cmp(i32 %x, i32 %y) { ; CHECK-LABEL: andn_cmp: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: bics wzr, w1, w0 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret @@ -15,7 +15,7 @@ define i1 @and_cmp(i32 %x, i32 %y) { ; CHECK-LABEL: and_cmp: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: bics wzr, w1, w0 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret @@ -27,7 +27,7 @@ define i1 @and_cmp_const(i32 %x) { ; CHECK-LABEL: and_cmp_const: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #43 ; CHECK-NEXT: bics wzr, w8, w0 ; CHECK-NEXT: cset w0, eq Index: llvm/trunk/test/CodeGen/AArch64/branch-relax-cbz.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/branch-relax-cbz.ll +++ llvm/trunk/test/CodeGen/AArch64/branch-relax-cbz.ll @@ -4,7 +4,7 @@ ; CHECK: cmn x{{[0-9]+}}, #5 ; CHECK-NEXT: b.le [[B2:LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: ; BB#1: ; %b3 +; CHECK-NEXT: ; %bb.1: ; %b3 ; CHECK: ldr [[LOAD:w[0-9]+]] ; CHECK: cbnz [[LOAD]], [[B8:LBB[0-9]+_[0-9]+]] ; CHECK-NEXT: b [[B7:LBB[0-9]+_[0-9]+]] Index: llvm/trunk/test/CodeGen/AArch64/fast-isel-assume.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/fast-isel-assume.ll +++ llvm/trunk/test/CodeGen/AArch64/fast-isel-assume.ll @@ -3,7 +3,7 @@ ; Check that we ignore the assume intrinsic. ; CHECK-LABEL: test: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: ret define void @test(i32 %a) { %tmp0 = icmp slt i32 %a, 0 Index: llvm/trunk/test/CodeGen/AArch64/fast-isel-atomic.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/fast-isel-atomic.ll +++ llvm/trunk/test/CodeGen/AArch64/fast-isel-atomic.ll @@ -5,7 +5,7 @@ ; currently match, so we might as well check both! Feel free to remove SDAG. ; CHECK-LABEL: atomic_store_monotonic_8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: strb w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_monotonic_8(i8* %p, i8 %val) #0 { @@ -14,7 +14,7 @@ } ; CHECK-LABEL: atomic_store_monotonic_8_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: strb w1, [x0, #1] ; CHECK-NEXT: ret define void @atomic_store_monotonic_8_off(i8* %p, i8 %val) #0 { @@ -24,7 +24,7 @@ } ; CHECK-LABEL: atomic_store_monotonic_16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: strh w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_monotonic_16(i16* %p, i16 %val) #0 { @@ -33,7 +33,7 @@ } ; CHECK-LABEL: atomic_store_monotonic_16_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: strh w1, [x0, #2] ; CHECK-NEXT: ret define void @atomic_store_monotonic_16_off(i16* %p, i16 %val) #0 { @@ -43,7 +43,7 @@ } ; CHECK-LABEL: atomic_store_monotonic_32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: str w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_monotonic_32(i32* %p, i32 %val) #0 { @@ -52,7 +52,7 @@ } ; CHECK-LABEL: atomic_store_monotonic_32_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: str w1, [x0, #4] ; CHECK-NEXT: ret define void @atomic_store_monotonic_32_off(i32* %p, i32 %val) #0 { @@ -62,7 +62,7 @@ } ; CHECK-LABEL: atomic_store_monotonic_64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: str x1, [x0] ; CHECK-NEXT: ret define void @atomic_store_monotonic_64(i64* %p, i64 %val) #0 { @@ -71,7 +71,7 @@ } ; CHECK-LABEL: atomic_store_monotonic_64_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: str x1, [x0, #8] ; CHECK-NEXT: ret define void @atomic_store_monotonic_64_off(i64* %p, i64 %val) #0 { @@ -81,7 +81,7 @@ } ; CHECK-LABEL: atomic_store_release_8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlrb w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_release_8(i8* %p, i8 %val) #0 { @@ -90,7 +90,7 @@ } ; CHECK-LABEL: atomic_store_release_8_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #1 ; CHECK-NEXT: stlrb w1, [x0] ; CHECK-NEXT: ret @@ -101,7 +101,7 @@ } ; CHECK-LABEL: atomic_store_release_16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlrh w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_release_16(i16* %p, i16 %val) #0 { @@ -110,7 +110,7 @@ } ; CHECK-LABEL: atomic_store_release_16_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #2 ; CHECK-NEXT: stlrh w1, [x0] ; CHECK-NEXT: ret @@ -121,7 +121,7 @@ } ; CHECK-LABEL: atomic_store_release_32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlr w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_release_32(i32* %p, i32 %val) #0 { @@ -130,7 +130,7 @@ } ; CHECK-LABEL: atomic_store_release_32_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #4 ; CHECK-NEXT: stlr w1, [x0] ; CHECK-NEXT: ret @@ -141,7 +141,7 @@ } ; CHECK-LABEL: atomic_store_release_64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlr x1, [x0] ; CHECK-NEXT: ret define void @atomic_store_release_64(i64* %p, i64 %val) #0 { @@ -150,7 +150,7 @@ } ; CHECK-LABEL: atomic_store_release_64_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #8 ; CHECK-NEXT: stlr x1, [x0] ; CHECK-NEXT: ret @@ -162,7 +162,7 @@ ; CHECK-LABEL: atomic_store_seq_cst_8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlrb w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_seq_cst_8(i8* %p, i8 %val) #0 { @@ -171,7 +171,7 @@ } ; CHECK-LABEL: atomic_store_seq_cst_8_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #1 ; CHECK-NEXT: stlrb w1, [x0] ; CHECK-NEXT: ret @@ -182,7 +182,7 @@ } ; CHECK-LABEL: atomic_store_seq_cst_16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlrh w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_seq_cst_16(i16* %p, i16 %val) #0 { @@ -191,7 +191,7 @@ } ; CHECK-LABEL: atomic_store_seq_cst_16_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #2 ; CHECK-NEXT: stlrh w1, [x0] ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ } ; CHECK-LABEL: atomic_store_seq_cst_32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlr w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_seq_cst_32(i32* %p, i32 %val) #0 { @@ -211,7 +211,7 @@ } ; CHECK-LABEL: atomic_store_seq_cst_32_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #4 ; CHECK-NEXT: stlr w1, [x0] ; CHECK-NEXT: ret @@ -222,7 +222,7 @@ } ; CHECK-LABEL: atomic_store_seq_cst_64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlr x1, [x0] ; CHECK-NEXT: ret define void @atomic_store_seq_cst_64(i64* %p, i64 %val) #0 { @@ -231,7 +231,7 @@ } ; CHECK-LABEL: atomic_store_seq_cst_64_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #8 ; CHECK-NEXT: stlr x1, [x0] ; CHECK-NEXT: ret Index: llvm/trunk/test/CodeGen/AArch64/fast-isel-cmp-vec.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/fast-isel-cmp-vec.ll +++ llvm/trunk/test/CodeGen/AArch64/fast-isel-cmp-vec.ll @@ -8,9 +8,9 @@ define <2 x i32> @icmp_v2i32(<2 x i32> %a) { ; CHECK-LABEL: icmp_v2i32: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: cmeq.2s [[CMP:v[0-9]+]], v0, #0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.2s [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b v0, [[CMP]], [[MASK]] ; CHECK-NEXT: ret @@ -23,9 +23,9 @@ define <2 x i32> @icmp_constfold_v2i32(<2 x i32> %a) { ; CHECK-LABEL: icmp_constfold_v2i32: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: movi d[[CMP:[0-9]+]], #0xffffffffffffffff -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.2s [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b v0, v[[CMP]], [[MASK]] ; CHECK-NEXT: ret @@ -38,10 +38,10 @@ define <4 x i32> @icmp_v4i32(<4 x i32> %a) { ; CHECK-LABEL: icmp_v4i32: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: cmeq.4s [[CMP:v[0-9]+]], v0, #0 ; CHECK-NEXT: xtn.4h [[CMPV4I16:v[0-9]+]], [[CMP]] -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.4h [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b [[ZEXT:v[0-9]+]], [[CMPV4I16]], [[MASK]] ; CHECK-NEXT: ushll.4s v0, [[ZEXT]], #0 @@ -55,9 +55,9 @@ define <4 x i32> @icmp_constfold_v4i32(<4 x i32> %a) { ; CHECK-LABEL: icmp_constfold_v4i32: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: movi d[[CMP:[0-9]+]], #0xffffffffffffffff -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.4h [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b [[ZEXT:v[0-9]+]], v[[CMP]], [[MASK]] ; CHECK-NEXT: ushll.4s v0, [[ZEXT]], #0 @@ -71,9 +71,9 @@ define <16 x i8> @icmp_v16i8(<16 x i8> %a) { ; CHECK-LABEL: icmp_v16i8: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: cmeq.16b [[CMP:v[0-9]+]], v0, #0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.16b [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.16b v0, [[CMP]], [[MASK]] ; CHECK-NEXT: ret @@ -86,9 +86,9 @@ define <16 x i8> @icmp_constfold_v16i8(<16 x i8> %a) { ; CHECK-LABEL: icmp_constfold_v16i8: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: movi.2d [[CMP:v[0-9]+]], #0xffffffffffffffff -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.16b [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.16b v0, [[CMP]], [[MASK]] ; CHECK-NEXT: ret Index: llvm/trunk/test/CodeGen/AArch64/fast-isel-cmpxchg.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/fast-isel-cmpxchg.ll +++ llvm/trunk/test/CodeGen/AArch64/fast-isel-cmpxchg.ll @@ -6,7 +6,7 @@ ; CHECK-NEXT: ldaxr [[OLD:w[0-9]+]], [x0] ; CHECK-NEXT: cmp [[OLD]], w1 ; CHECK-NEXT: b.ne [[DONE:.LBB[0-9_]+]] -; CHECK-NEXT: // BB#2: +; CHECK-NEXT: // %bb.2: ; CHECK-NEXT: stlxr [[STATUS]], w2, [x0] ; CHECK-NEXT: cbnz [[STATUS]], [[RETRY]] ; CHECK-NEXT: [[DONE]]: @@ -25,14 +25,14 @@ } ; CHECK-LABEL: cmpxchg_acq_rel_32_load: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK: ldr [[NEW:w[0-9]+]], [x2] ; CHECK-NEXT: [[RETRY:.LBB[0-9_]+]]: ; CHECK-NEXT: mov [[STATUS:w[0-9]+]], #0 ; CHECK-NEXT: ldaxr [[OLD:w[0-9]+]], [x0] ; CHECK-NEXT: cmp [[OLD]], w1 ; CHECK-NEXT: b.ne [[DONE:.LBB[0-9_]+]] -; CHECK-NEXT: // BB#2: +; CHECK-NEXT: // %bb.2: ; CHECK-NEXT: stlxr [[STATUS]], [[NEW]], [x0] ; CHECK-NEXT: cbnz [[STATUS]], [[RETRY]] ; CHECK-NEXT: [[DONE]]: @@ -57,7 +57,7 @@ ; CHECK-NEXT: ldaxr [[OLD:x[0-9]+]], [x0] ; CHECK-NEXT: cmp [[OLD]], x1 ; CHECK-NEXT: b.ne [[DONE:.LBB[0-9_]+]] -; CHECK-NEXT: // BB#2: +; CHECK-NEXT: // %bb.2: ; CHECK-NEXT: stlxr [[STATUS]], x2, [x0] ; CHECK-NEXT: cbnz [[STATUS]], [[RETRY]] ; CHECK-NEXT: [[DONE]]: Index: llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll +++ llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll @@ -152,7 +152,7 @@ define double @bitcast_fabs(double %x) { ; CHECK-LABEL: bitcast_fabs: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: fabs d0, d0 ; CHECK-NEXT: ret ; @@ -164,7 +164,7 @@ define float @bitcast_fneg(float %x) { ; CHECK-LABEL: bitcast_fneg: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: fneg s0, s0 ; CHECK-NEXT: ret ; Index: llvm/trunk/test/CodeGen/AArch64/local_vars.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/local_vars.ll +++ llvm/trunk/test/CodeGen/AArch64/local_vars.ll @@ -17,7 +17,7 @@ define void @trivial_func() nounwind { ; CHECK-LABEL: trivial_func: // @trivial_func -; CHECK-NEXT: // BB#0 +; CHECK-NEXT: // %bb.0 ; CHECK-NEXT: ret ret void Index: llvm/trunk/test/CodeGen/AArch64/max-jump-table.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/max-jump-table.ll +++ llvm/trunk/test/CodeGen/AArch64/max-jump-table.ll @@ -77,10 +77,10 @@ ] ; CHECK-LABEL: function jt2: ; CHECK-NEXT: Jump Tables: -; CHECK0-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#5 BB#6{{$}} -; CHECK4-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4{{$}} -; CHECK8-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4{{$}} -; CHECKM1-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4{{$}} +; CHECK0-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.5 %bb.6{{$}} +; CHECK4-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4{{$}} +; CHECK8-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4{{$}} +; CHECKM1-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4{{$}} ; CHEC-NEXT: Function Live Ins: bb1: tail call void @ext(i32 1) br label %return Index: llvm/trunk/test/CodeGen/AArch64/neon-bitcast.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/neon-bitcast.ll +++ llvm/trunk/test/CodeGen/AArch64/neon-bitcast.ll @@ -4,7 +4,7 @@ define <1 x i64> @test_v8i8_to_v1i64(<8 x i8> %in) nounwind { ; CHECK: test_v8i8_to_v1i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i8> %in to <1 x i64> @@ -13,7 +13,7 @@ define <2 x i32> @test_v8i8_to_v2i32(<8 x i8> %in) nounwind { ; CHECK: test_v8i8_to_v2i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i8> %in to <2 x i32> @@ -22,7 +22,7 @@ define <2 x float> @test_v8i8_to_v2f32(<8 x i8> %in) nounwind{ ; CHECK: test_v8i8_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i8> %in to <2 x float> @@ -31,7 +31,7 @@ define <4 x i16> @test_v8i8_to_v4i16(<8 x i8> %in) nounwind{ ; CHECK: test_v8i8_to_v4i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i8> %in to <4 x i16> @@ -40,7 +40,7 @@ define <8 x i8> @test_v8i8_to_v8i8(<8 x i8> %in) nounwind{ ; CHECK: test_v8i8_to_v8i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i8> %in to <8 x i8> @@ -51,7 +51,7 @@ define <1 x i64> @test_v4i16_to_v1i64(<4 x i16> %in) nounwind { ; CHECK: test_v4i16_to_v1i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i16> %in to <1 x i64> @@ -60,7 +60,7 @@ define <2 x i32> @test_v4i16_to_v2i32(<4 x i16> %in) nounwind { ; CHECK: test_v4i16_to_v2i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i16> %in to <2 x i32> @@ -69,7 +69,7 @@ define <2 x float> @test_v4i16_to_v2f32(<4 x i16> %in) nounwind{ ; CHECK: test_v4i16_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i16> %in to <2 x float> @@ -78,7 +78,7 @@ define <4 x i16> @test_v4i16_to_v4i16(<4 x i16> %in) nounwind{ ; CHECK: test_v4i16_to_v4i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i16> %in to <4 x i16> @@ -87,7 +87,7 @@ define <8 x i8> @test_v4i16_to_v8i8(<4 x i16> %in) nounwind{ ; CHECK: test_v4i16_to_v8i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i16> %in to <8 x i8> @@ -98,7 +98,7 @@ define <1 x i64> @test_v2i32_to_v1i64(<2 x i32> %in) nounwind { ; CHECK: test_v2i32_to_v1i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i32> %in to <1 x i64> @@ -107,7 +107,7 @@ define <2 x i32> @test_v2i32_to_v2i32(<2 x i32> %in) nounwind { ; CHECK: test_v2i32_to_v2i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i32> %in to <2 x i32> @@ -116,7 +116,7 @@ define <2 x float> @test_v2i32_to_v2f32(<2 x i32> %in) nounwind{ ; CHECK: test_v2i32_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i32> %in to <2 x float> @@ -125,7 +125,7 @@ define <4 x i16> @test_v2i32_to_v4i16(<2 x i32> %in) nounwind{ ; CHECK: test_v2i32_to_v4i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i32> %in to <4 x i16> @@ -134,7 +134,7 @@ define <8 x i8> @test_v2i32_to_v8i8(<2 x i32> %in) nounwind{ ; CHECK: test_v2i32_to_v8i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i32> %in to <8 x i8> @@ -145,7 +145,7 @@ define <1 x i64> @test_v2f32_to_v1i64(<2 x float> %in) nounwind { ; CHECK: test_v2f32_to_v1i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x float> %in to <1 x i64> @@ -154,7 +154,7 @@ define <2 x i32> @test_v2f32_to_v2i32(<2 x float> %in) nounwind { ; CHECK: test_v2f32_to_v2i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x float> %in to <2 x i32> @@ -163,7 +163,7 @@ define <2 x float> @test_v2f32_to_v2f32(<2 x float> %in) nounwind{ ; CHECK: test_v2f32_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x float> %in to <2 x float> @@ -172,7 +172,7 @@ define <4 x i16> @test_v2f32_to_v4i16(<2 x float> %in) nounwind{ ; CHECK: test_v2f32_to_v4i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x float> %in to <4 x i16> @@ -181,7 +181,7 @@ define <8 x i8> @test_v2f32_to_v8i8(<2 x float> %in) nounwind{ ; CHECK: test_v2f32_to_v8i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x float> %in to <8 x i8> @@ -192,7 +192,7 @@ define <1 x i64> @test_v1i64_to_v1i64(<1 x i64> %in) nounwind { ; CHECK: test_v1i64_to_v1i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <1 x i64> %in to <1 x i64> @@ -201,7 +201,7 @@ define <2 x i32> @test_v1i64_to_v2i32(<1 x i64> %in) nounwind { ; CHECK: test_v1i64_to_v2i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <1 x i64> %in to <2 x i32> @@ -210,7 +210,7 @@ define <2 x float> @test_v1i64_to_v2f32(<1 x i64> %in) nounwind{ ; CHECK: test_v1i64_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <1 x i64> %in to <2 x float> @@ -219,7 +219,7 @@ define <4 x i16> @test_v1i64_to_v4i16(<1 x i64> %in) nounwind{ ; CHECK: test_v1i64_to_v4i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <1 x i64> %in to <4 x i16> @@ -228,7 +228,7 @@ define <8 x i8> @test_v1i64_to_v8i8(<1 x i64> %in) nounwind{ ; CHECK: test_v1i64_to_v8i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <1 x i64> %in to <8 x i8> @@ -240,7 +240,7 @@ define <2 x double> @test_v16i8_to_v2f64(<16 x i8> %in) nounwind { ; CHECK: test_v16i8_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <2 x double> @@ -249,7 +249,7 @@ define <2 x i64> @test_v16i8_to_v2i64(<16 x i8> %in) nounwind { ; CHECK: test_v16i8_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <2 x i64> @@ -258,7 +258,7 @@ define <4 x i32> @test_v16i8_to_v4i32(<16 x i8> %in) nounwind { ; CHECK: test_v16i8_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <4 x i32> @@ -267,7 +267,7 @@ define <4 x float> @test_v16i8_to_v2f32(<16 x i8> %in) nounwind{ ; CHECK: test_v16i8_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <4 x float> @@ -276,7 +276,7 @@ define <8 x i16> @test_v16i8_to_v8i16(<16 x i8> %in) nounwind{ ; CHECK: test_v16i8_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <8 x i16> @@ -285,7 +285,7 @@ define <16 x i8> @test_v16i8_to_v16i8(<16 x i8> %in) nounwind{ ; CHECK: test_v16i8_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <16 x i8> @@ -296,7 +296,7 @@ define <2 x double> @test_v8i16_to_v2f64(<8 x i16> %in) nounwind { ; CHECK: test_v8i16_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <2 x double> @@ -305,7 +305,7 @@ define <2 x i64> @test_v8i16_to_v2i64(<8 x i16> %in) nounwind { ; CHECK: test_v8i16_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <2 x i64> @@ -314,7 +314,7 @@ define <4 x i32> @test_v8i16_to_v4i32(<8 x i16> %in) nounwind { ; CHECK: test_v8i16_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <4 x i32> @@ -323,7 +323,7 @@ define <4 x float> @test_v8i16_to_v2f32(<8 x i16> %in) nounwind{ ; CHECK: test_v8i16_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <4 x float> @@ -332,7 +332,7 @@ define <8 x i16> @test_v8i16_to_v8i16(<8 x i16> %in) nounwind{ ; CHECK: test_v8i16_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <8 x i16> @@ -341,7 +341,7 @@ define <16 x i8> @test_v8i16_to_v16i8(<8 x i16> %in) nounwind{ ; CHECK: test_v8i16_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <16 x i8> @@ -352,7 +352,7 @@ define <2 x double> @test_v4i32_to_v2f64(<4 x i32> %in) nounwind { ; CHECK: test_v4i32_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <2 x double> @@ -361,7 +361,7 @@ define <2 x i64> @test_v4i32_to_v2i64(<4 x i32> %in) nounwind { ; CHECK: test_v4i32_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <2 x i64> @@ -370,7 +370,7 @@ define <4 x i32> @test_v4i32_to_v4i32(<4 x i32> %in) nounwind { ; CHECK: test_v4i32_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <4 x i32> @@ -379,7 +379,7 @@ define <4 x float> @test_v4i32_to_v2f32(<4 x i32> %in) nounwind{ ; CHECK: test_v4i32_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <4 x float> @@ -388,7 +388,7 @@ define <8 x i16> @test_v4i32_to_v8i16(<4 x i32> %in) nounwind{ ; CHECK: test_v4i32_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <8 x i16> @@ -397,7 +397,7 @@ define <16 x i8> @test_v4i32_to_v16i8(<4 x i32> %in) nounwind{ ; CHECK: test_v4i32_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <16 x i8> @@ -408,7 +408,7 @@ define <2 x double> @test_v4f32_to_v2f64(<4 x float> %in) nounwind { ; CHECK: test_v4f32_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <2 x double> @@ -417,7 +417,7 @@ define <2 x i64> @test_v4f32_to_v2i64(<4 x float> %in) nounwind { ; CHECK: test_v4f32_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <2 x i64> @@ -426,7 +426,7 @@ define <4 x i32> @test_v4f32_to_v4i32(<4 x float> %in) nounwind { ; CHECK: test_v4f32_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <4 x i32> @@ -435,7 +435,7 @@ define <4 x float> @test_v4f32_to_v4f32(<4 x float> %in) nounwind{ ; CHECK: test_v4f32_to_v4f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <4 x float> @@ -444,7 +444,7 @@ define <8 x i16> @test_v4f32_to_v8i16(<4 x float> %in) nounwind{ ; CHECK: test_v4f32_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <8 x i16> @@ -453,7 +453,7 @@ define <16 x i8> @test_v4f32_to_v16i8(<4 x float> %in) nounwind{ ; CHECK: test_v4f32_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <16 x i8> @@ -464,7 +464,7 @@ define <2 x double> @test_v2i64_to_v2f64(<2 x i64> %in) nounwind { ; CHECK: test_v2i64_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <2 x double> @@ -473,7 +473,7 @@ define <2 x i64> @test_v2i64_to_v2i64(<2 x i64> %in) nounwind { ; CHECK: test_v2i64_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <2 x i64> @@ -482,7 +482,7 @@ define <4 x i32> @test_v2i64_to_v4i32(<2 x i64> %in) nounwind { ; CHECK: test_v2i64_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <4 x i32> @@ -491,7 +491,7 @@ define <4 x float> @test_v2i64_to_v4f32(<2 x i64> %in) nounwind{ ; CHECK: test_v2i64_to_v4f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <4 x float> @@ -500,7 +500,7 @@ define <8 x i16> @test_v2i64_to_v8i16(<2 x i64> %in) nounwind{ ; CHECK: test_v2i64_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <8 x i16> @@ -509,7 +509,7 @@ define <16 x i8> @test_v2i64_to_v16i8(<2 x i64> %in) nounwind{ ; CHECK: test_v2i64_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <16 x i8> @@ -520,7 +520,7 @@ define <2 x double> @test_v2f64_to_v2f64(<2 x double> %in) nounwind { ; CHECK: test_v2f64_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <2 x double> @@ -529,7 +529,7 @@ define <2 x i64> @test_v2f64_to_v2i64(<2 x double> %in) nounwind { ; CHECK: test_v2f64_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <2 x i64> @@ -538,7 +538,7 @@ define <4 x i32> @test_v2f64_to_v4i32(<2 x double> %in) nounwind { ; CHECK: test_v2f64_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <4 x i32> @@ -547,7 +547,7 @@ define <4 x float> @test_v2f64_to_v4f32(<2 x double> %in) nounwind{ ; CHECK: test_v2f64_to_v4f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <4 x float> @@ -556,7 +556,7 @@ define <8 x i16> @test_v2f64_to_v8i16(<2 x double> %in) nounwind{ ; CHECK: test_v2f64_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <8 x i16> @@ -565,7 +565,7 @@ define <16 x i8> @test_v2f64_to_v16i8(<2 x double> %in) nounwind{ ; CHECK: test_v2f64_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <16 x i8> Index: llvm/trunk/test/CodeGen/AArch64/nest-register.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/nest-register.ll +++ llvm/trunk/test/CodeGen/AArch64/nest-register.ll @@ -5,7 +5,7 @@ define i8* @nest_receiver(i8* nest %arg) nounwind { ; CHECK-LABEL: nest_receiver: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov x0, x18 ; CHECK-NEXT: ret Index: llvm/trunk/test/CodeGen/AArch64/recp-fastmath.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/recp-fastmath.ll +++ llvm/trunk/test/CodeGen/AArch64/recp-fastmath.ll @@ -5,7 +5,7 @@ ret float %div ; CHECK-LABEL: frecp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv } @@ -15,7 +15,7 @@ ret float %div ; CHECK-LABEL: frecp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:s[0-7]]] ; CHECK-NEXT: frecps {{s[0-7](, s[0-7])?}}, [[R]] ; CHECK: frecps {{s[0-7]}}, {{s[0-7]}}, {{s[0-7]}} @@ -27,7 +27,7 @@ ret <2 x float> %div ; CHECK-LABEL: f2recp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv } @@ -37,7 +37,7 @@ ret <2 x float> %div ; CHECK-LABEL: f2recp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:v[0-7]\.2s]] ; CHECK-NEXT: frecps {{v[0-7]\.2s(, v[0-7].2s)?}}, [[R]] ; CHECK: frecps {{v[0-7]\.2s}}, {{v[0-7]\.2s}}, {{v[0-7]\.2s}} @@ -49,7 +49,7 @@ ret <4 x float> %div ; CHECK-LABEL: f4recp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv } @@ -59,7 +59,7 @@ ret <4 x float> %div ; CHECK-LABEL: f4recp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:v[0-7]\.4s]] ; CHECK-NEXT: frecps {{v[0-7]\.4s(, v[0-7].4s)?}}, [[R]] ; CHECK: frecps {{v[0-7]\.4s}}, {{v[0-7]\.4s}}, {{v[0-7]\.4s}} @@ -71,7 +71,7 @@ ret <8 x float> %div ; CHECK-LABEL: f8recp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv ; CHECK-NEXT: fdiv @@ -82,7 +82,7 @@ ret <8 x float> %div ; CHECK-LABEL: f8recp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:v[0-7]\.4s]] ; CHECK: frecps {{v[0-7]\.4s(, v[0-7].4s)?}}, [[R]] ; CHECK: frecps {{v[0-7]\.4s(, v[0-7].4s)?}}, {{v[0-7]\.4s}} @@ -96,7 +96,7 @@ ret double %div ; CHECK-LABEL: drecp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv } @@ -106,7 +106,7 @@ ret double %div ; CHECK-LABEL: drecp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:d[0-7]]] ; CHECK-NEXT: frecps {{d[0-7](, d[0-7])?}}, [[R]] ; CHECK: frecps {{d[0-7]}}, {{d[0-7]}}, {{d[0-7]}} @@ -119,7 +119,7 @@ ret <2 x double> %div ; CHECK-LABEL: d2recp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv } @@ -129,7 +129,7 @@ ret <2 x double> %div ; CHECK-LABEL: d2recp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:v[0-7]\.2d]] ; CHECK-NEXT: frecps {{v[0-7]\.2d(, v[0-7].2d)?}}, [[R]] ; CHECK: frecps {{v[0-7]\.2d}}, {{v[0-7]\.2d}}, {{v[0-7]\.2d}} @@ -142,7 +142,7 @@ ret <4 x double> %div ; CHECK-LABEL: d4recp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv ; CHECK-NEXT: fdiv @@ -153,7 +153,7 @@ ret <4 x double> %div ; CHECK-LABEL: d4recp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:v[0-7]\.2d]] ; CHECK: frecps {{v[0-7]\.2d(, v[0-7].2d)?}}, [[R]] ; CHECK: frecps {{v[0-7]\.2d}}, {{v[0-7]\.2d}}, {{v[0-7]\.2d}} Index: llvm/trunk/test/CodeGen/AArch64/selectcc-to-shiftand.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/selectcc-to-shiftand.ll +++ llvm/trunk/test/CodeGen/AArch64/selectcc-to-shiftand.ll @@ -4,7 +4,7 @@ define i32 @neg_sel_constants(i32 %a) { ; CHECK-LABEL: neg_sel_constants: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #5 ; CHECK-NEXT: and w0, w8, w0, asr #31 ; CHECK-NEXT: ret @@ -18,7 +18,7 @@ define i32 @neg_sel_special_constant(i32 %a) { ; CHECK-LABEL: neg_sel_special_constant: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: lsr w8, w0, #22 ; CHECK-NEXT: and w0, w8, #0x200 ; CHECK-NEXT: ret @@ -32,7 +32,7 @@ define i32 @neg_sel_variable_and_zero(i32 %a, i32 %b) { ; CHECK-LABEL: neg_sel_variable_and_zero: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: and w0, w1, w0, asr #31 ; CHECK-NEXT: ret ; @@ -45,7 +45,7 @@ define i32 @not_pos_sel_same_variable(i32 %a) { ; CHECK-LABEL: not_pos_sel_same_variable: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: and w0, w0, w0, asr #31 ; CHECK-NEXT: ret ; @@ -60,7 +60,7 @@ define i32 @pos_sel_constants(i32 %a) { ; CHECK-LABEL: pos_sel_constants: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #5 ; CHECK-NEXT: bic w0, w8, w0, asr #31 ; CHECK-NEXT: ret @@ -74,7 +74,7 @@ define i32 @pos_sel_special_constant(i32 %a) { ; CHECK-LABEL: pos_sel_special_constant: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: orr w8, wzr, #0x200 ; CHECK-NEXT: bic w0, w8, w0, lsr #22 ; CHECK-NEXT: ret @@ -88,7 +88,7 @@ define i32 @pos_sel_variable_and_zero(i32 %a, i32 %b) { ; CHECK-LABEL: pos_sel_variable_and_zero: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: bic w0, w1, w0, asr #31 ; CHECK-NEXT: ret ; @@ -101,7 +101,7 @@ define i32 @not_neg_sel_same_variable(i32 %a) { ; CHECK-LABEL: not_neg_sel_same_variable: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: bic w0, w0, w0, asr #31 ; CHECK-NEXT: ret ; @@ -115,7 +115,7 @@ ; ret = (x-y) > 0 ? x-y : 0 define i32 @PR31175(i32 %x, i32 %y) { ; CHECK-LABEL: PR31175: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: sub w8, w0, w1 ; CHECK-NEXT: bic w0, w8, w8, asr #31 ; CHECK-NEXT: ret Index: llvm/trunk/test/CodeGen/AArch64/sibling-call.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/sibling-call.ll +++ llvm/trunk/test/CodeGen/AArch64/sibling-call.ll @@ -6,7 +6,7 @@ define void @caller_to0_from0() nounwind { ; CHECK-LABEL: caller_to0_from0: -; CHECK-NEXT: // BB +; CHECK-NEXT: // %bb. tail call void @callee_stack0() ret void ; CHECK-NEXT: b callee_stack0 @@ -14,7 +14,7 @@ define void @caller_to0_from8([8 x i32], i64) nounwind{ ; CHECK-LABEL: caller_to0_from8: -; CHECK-NEXT: // BB +; CHECK-NEXT: // %bb. tail call void @callee_stack0() ret void Index: llvm/trunk/test/CodeGen/AArch64/sqrt-fastmath.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/sqrt-fastmath.ll +++ llvm/trunk/test/CodeGen/AArch64/sqrt-fastmath.ll @@ -14,11 +14,11 @@ ret float %1 ; FAULT-LABEL: fsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: fsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:s[0-7]]] ; CHECK-NEXT: fmul [[RB:s[0-7]]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{s[0-7](, s[0-7])?}}, [[RB]] @@ -32,11 +32,11 @@ ret <2 x float> %1 ; FAULT-LABEL: f2sqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f2sqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2s]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.2s]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.2s(, v[0-7]\.2s)?}}, [[RB]] @@ -50,11 +50,11 @@ ret <4 x float> %1 ; FAULT-LABEL: f4sqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f4sqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]] @@ -68,12 +68,12 @@ ret <8 x float> %1 ; FAULT-LABEL: f8sqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f8sqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]] @@ -92,11 +92,11 @@ ret double %1 ; FAULT-LABEL: dsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: dsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:d[0-7]]] ; CHECK-NEXT: fmul [[RB:d[0-7]]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{d[0-7](, d[0-7])?}}, [[RB]] @@ -111,11 +111,11 @@ ret <2 x double> %1 ; FAULT-LABEL: d2sqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: d2sqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]] @@ -130,12 +130,12 @@ ret <4 x double> %1 ; FAULT-LABEL: d4sqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; FAULT-NEXT: fsqrt ; CHECK-LABEL: d4sqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]] @@ -158,11 +158,11 @@ ret float %2 ; FAULT-LABEL: frsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: frsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:s[0-7]]] ; CHECK-NEXT: fmul [[RB:s[0-7]]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{s[0-7](, s[0-7])?}}, [[RB]] @@ -177,11 +177,11 @@ ret <2 x float> %2 ; FAULT-LABEL: f2rsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f2rsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2s]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.2s]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.2s(, v[0-7]\.2s)?}}, [[RB]] @@ -196,11 +196,11 @@ ret <4 x float> %2 ; FAULT-LABEL: f4rsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f4rsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]] @@ -215,12 +215,12 @@ ret <8 x float> %2 ; FAULT-LABEL: f8rsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f8rsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]] ; CHECK: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]] ; CHECK: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]] @@ -237,11 +237,11 @@ ret double %2 ; FAULT-LABEL: drsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: drsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:d[0-7]]] ; CHECK-NEXT: fmul [[RB:d[0-7]]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{d[0-7](, d[0-7])?}}, [[RB]] @@ -257,11 +257,11 @@ ret <2 x double> %2 ; FAULT-LABEL: d2rsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: d2rsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]] @@ -277,12 +277,12 @@ ret <4 x double> %2 ; FAULT-LABEL: d4rsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; FAULT-NEXT: fsqrt ; CHECK-LABEL: d4rsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]] ; CHECK: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]] ; CHECK: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]] Index: llvm/trunk/test/CodeGen/AArch64/tail-call.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/tail-call.ll +++ llvm/trunk/test/CodeGen/AArch64/tail-call.ll @@ -7,7 +7,7 @@ define fastcc void @caller_to0_from0() nounwind { ; CHECK-LABEL: caller_to0_from0: -; CHECK-NEXT: // BB +; CHECK-NEXT: // %bb. tail call fastcc void @callee_stack0() ret void Index: llvm/trunk/test/CodeGen/AMDGPU/branch-relaxation.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/branch-relaxation.ll +++ llvm/trunk/test/CodeGen/AMDGPU/branch-relaxation.ll @@ -24,7 +24,7 @@ ; GCN-NEXT: s_cbranch_scc1 [[BB3:BB[0-9]+_[0-9]+]] -; GCN-NEXT: ; BB#1: ; %bb2 +; GCN-NEXT: ; %bb.1: ; %bb2 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: v_nop_e64 ; GCN-NEXT: v_nop_e64 @@ -275,7 +275,7 @@ } ; GCN-LABEL: {{^}}uniform_unconditional_min_long_backward_branch: -; GCN-NEXT: ; BB#0: ; %entry +; GCN-NEXT: ; %bb.0: ; %entry ; GCN-NEXT: [[LOOP:BB[0-9]_[0-9]+]]: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 @@ -311,7 +311,7 @@ ; branch from %bb0 to %bb2 ; GCN-LABEL: {{^}}expand_requires_expand: -; GCN-NEXT: ; BB#0: ; %bb0 +; GCN-NEXT: ; %bb.0: ; %bb0 ; GCN: s_load_dword ; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 0{{$}} ; GCN-NEXT: s_cbranch_scc0 [[BB1:BB[0-9]+_[0-9]+]] @@ -398,7 +398,7 @@ ; GCN: s_cmp_lg_u32 ; GCN: s_cbranch_scc1 [[ENDIF]] -; GCN-NEXT: ; BB#2: ; %if_uniform +; GCN-NEXT: ; %bb.2: ; %if_uniform ; GCN: buffer_store_dword ; GCN-NEXT: [[ENDIF]]: ; %endif Index: llvm/trunk/test/CodeGen/AMDGPU/callee-frame-setup.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/callee-frame-setup.ll +++ llvm/trunk/test/CodeGen/AMDGPU/callee-frame-setup.ll @@ -2,7 +2,7 @@ ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=GFX9 %s ; GCN-LABEL: {{^}}callee_no_stack: -; GCN: ; BB#0: +; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt ; GCN-NEXT: s_setpc_b64 define void @callee_no_stack() #0 { @@ -10,7 +10,7 @@ } ; GCN-LABEL: {{^}}callee_no_stack_no_fp_elim: -; GCN: ; BB#0: +; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt ; GCN-NEXT: s_setpc_b64 define void @callee_no_stack_no_fp_elim() #1 { @@ -20,7 +20,7 @@ ; Requires frame pointer for access to local regular object. ; GCN-LABEL: {{^}}callee_with_stack: -; GCN: ; BB#0: +; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt ; GCN-NEXT: s_mov_b32 s5, s32 ; GCN-NEXT: v_mov_b32_e32 v0, 0{{$}} @@ -34,7 +34,7 @@ } ; GCN-LABEL: {{^}}callee_with_stack_and_call: -; GCN: ; BB#0: +; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt ; GCN: s_mov_b32 s5, s32 ; GCN: buffer_store_dword v32, off, s[0:3], s5 offset:8 Index: llvm/trunk/test/CodeGen/AMDGPU/cf-loop-on-constant.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/cf-loop-on-constant.ll +++ llvm/trunk/test/CodeGen/AMDGPU/cf-loop-on-constant.ll @@ -102,7 +102,7 @@ ; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 4 ; GCN: s_cbranch_vccnz [[LOOPBB]] -; GCN-NEXT: ; BB#2 +; GCN-NEXT: ; %bb.2 ; GCN-NEXT: s_endpgm define amdgpu_kernel void @loop_arg_0(float addrspace(3)* %ptr, i32 %n, i1 %cond) nounwind { entry: Index: llvm/trunk/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll +++ llvm/trunk/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll @@ -13,7 +13,7 @@ ; VGPR: workitem_private_segment_byte_size = 12{{$}} -; GCN: {{^}}; BB#0: +; GCN: {{^}}; %bb.0: ; GCN: s_mov_b32 m0, -1 ; GCN: ds_read_b32 [[LOAD0:v[0-9]+]] @@ -91,7 +91,7 @@ ; GCN-LABEL: {{^}}divergent_loop: ; VGPR: workitem_private_segment_byte_size = 12{{$}} -; GCN: {{^}}; BB#0: +; GCN: {{^}}; %bb.0: ; GCN: s_mov_b32 m0, -1 ; GCN: ds_read_b32 [[LOAD0:v[0-9]+]] @@ -167,7 +167,7 @@ } ; GCN-LABEL: {{^}}divergent_if_else_endif: -; GCN: {{^}}; BB#0: +; GCN: {{^}}; %bb.0: ; GCN: s_mov_b32 m0, -1 ; GCN: ds_read_b32 [[LOAD0:v[0-9]+]] Index: llvm/trunk/test/CodeGen/AMDGPU/convergent-inlineasm.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/convergent-inlineasm.ll +++ llvm/trunk/test/CodeGen/AMDGPU/convergent-inlineasm.ll @@ -2,7 +2,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0 ; GCN-LABEL: {{^}}convergent_inlineasm: -; GCN: BB#0: +; GCN: %bb.0: ; GCN: v_cmp_ne_u32_e64 ; GCN: ; mask branch ; GCN: BB{{[0-9]+_[0-9]+}}: Index: llvm/trunk/test/CodeGen/AMDGPU/early-if-convert.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/early-if-convert.ll +++ llvm/trunk/test/CodeGen/AMDGPU/early-if-convert.ll @@ -382,7 +382,7 @@ } ; GCN-LABEL: {{^}}ifcvt_undef_scc: -; GCN: {{^}}; BB#0: +; GCN: {{^}}; %bb.0: ; GCN-NEXT: s_load_dwordx2 ; GCN-NEXT: s_cselect_b32 s{{[0-9]+}}, 1, 0 define amdgpu_kernel void @ifcvt_undef_scc(i32 %cond, i32 addrspace(1)* %out) { Index: llvm/trunk/test/CodeGen/AMDGPU/else.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/else.ll +++ llvm/trunk/test/CodeGen/AMDGPU/else.ll @@ -25,7 +25,7 @@ } ; CHECK-LABEL: {{^}}else_execfix_leave_wqm: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: s_mov_b64 [[INIT_EXEC:s\[[0-9]+:[0-9]+\]]], exec ; CHECK: ; %Flow ; CHECK-NEXT: s_or_saveexec_b64 [[DST:s\[[0-9]+:[0-9]+\]]], Index: llvm/trunk/test/CodeGen/AMDGPU/fence-amdgiz.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/fence-amdgiz.ll +++ llvm/trunk/test/CodeGen/AMDGPU/fence-amdgiz.ll @@ -3,7 +3,7 @@ target datalayout = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5" ; CHECK-LABEL: atomic_fence -; CHECK: BB#0: +; CHECK: %bb.0: ; CHECK-NOT: ATOMIC_FENCE ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: buffer_wbinvl1_vol Index: llvm/trunk/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll +++ llvm/trunk/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll @@ -3,7 +3,7 @@ ; SILowerI1Copies was not handling IMPLICIT_DEF ; SI-LABEL: {{^}}br_implicit_def: -; SI: BB#0: +; SI: %bb.0: ; SI-NEXT: s_cbranch_scc1 define amdgpu_kernel void @br_implicit_def(i32 addrspace(1)* %out, i32 %arg) #0 { bb: Index: llvm/trunk/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir +++ llvm/trunk/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir @@ -26,7 +26,7 @@ ... --- # CHECK-LABEL: name: invert_br_undef_vcc -# CHECK: S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc +# CHECK: S_CBRANCH_VCCZ %bb.1, implicit undef %vcc name: invert_br_undef_vcc alignment: 0 @@ -58,7 +58,7 @@ %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %sgpr7 = S_MOV_B32 61440 %sgpr6 = S_MOV_B32 -1 - S_CBRANCH_VCCNZ %bb.2.if, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc bb.1.else: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 @@ -66,7 +66,7 @@ %vgpr0 = V_MOV_B32_e32 100, implicit %exec BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) %vgpr0 = V_MOV_B32_e32 1, implicit %exec - S_BRANCH %bb.3.done + S_BRANCH %bb.3 bb.2.if: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll @@ -127,7 +127,7 @@ } ;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged: -;CHECK-NEXT: BB# +;CHECK-NEXT: %bb. ;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4 ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28 ;CHECK: s_waitcnt @@ -151,7 +151,7 @@ } ;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged_glc_slc: -;CHECK-NEXT: BB# +;CHECK-NEXT: %bb. ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4{{$}} ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:12 glc{{$}} ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28 glc slc{{$}} @@ -176,7 +176,7 @@ } ;CHECK-LABEL: {{^}}buffer_load_x2_offen_merged: -;CHECK-NEXT: BB# +;CHECK-NEXT: %bb. ;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4 ;CHECK: s_waitcnt define amdgpu_ps void @buffer_load_x2_offen_merged(<4 x i32> inreg %rsrc, i32 %a) { @@ -194,7 +194,7 @@ } ;CHECK-LABEL: {{^}}buffer_load_x1_offset_merged: -;CHECK-NEXT: BB# +;CHECK-NEXT: %bb. ;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:4 ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:28 ;CHECK: s_waitcnt @@ -212,7 +212,7 @@ } ;CHECK-LABEL: {{^}}buffer_load_x2_offset_merged: -;CHECK-NEXT: BB# +;CHECK-NEXT: %bb. ;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:4 ;CHECK: s_waitcnt define amdgpu_ps void @buffer_load_x2_offset_merged(<4 x i32> inreg %rsrc) { Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll @@ -4,7 +4,7 @@ declare void @llvm.amdgcn.buffer.wbinvl1() #0 ; GCN-LABEL: {{^}}test_buffer_wbinvl1: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; SI-NEXT: buffer_wbinvl1 ; encoding: [0x00,0x00,0xc4,0xe1,0x00,0x00,0x00,0x00] ; VI-NEXT: buffer_wbinvl1 ; encoding: [0x00,0x00,0xf8,0xe0,0x00,0x00,0x00,0x00] ; GCN-NEXT: s_endpgm Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll @@ -3,7 +3,7 @@ declare void @llvm.amdgcn.buffer.wbinvl1.sc() #0 ; SI-LABEL: {{^}}test_buffer_wbinvl1_sc: -; SI-NEXT: ; BB#0: +; SI-NEXT: ; %bb.0: ; SI-NEXT: buffer_wbinvl1_sc ; encoding: [0x00,0x00,0xc0,0xe1,0x00,0x00,0x00,0x00] ; SI-NEXT: s_endpgm define amdgpu_kernel void @test_buffer_wbinvl1_sc() #0 { Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll @@ -4,7 +4,7 @@ declare void @llvm.amdgcn.buffer.wbinvl1.vol() #0 ; GCN-LABEL: {{^}}test_buffer_wbinvl1_vol: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; CI-NEXT: buffer_wbinvl1_vol ; encoding: [0x00,0x00,0xc0,0xe1,0x00,0x00,0x00,0x00] ; VI-NEXT: buffer_wbinvl1_vol ; encoding: [0x00,0x00,0xfc,0xe0,0x00,0x00,0x00,0x00] ; GCN: s_endpgm Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll @@ -25,7 +25,7 @@ ; FIXME: Folds to 0 on gfx9 ; GCN-LABEL: {{^}}s_cvt_pkrtz_undef_undef: -; GCN-NEXT: ; BB#0 +; GCN-NEXT: ; %bb.0 ; SI-NEXT: s_endpgm ; VI-NEXT: s_endpgm ; GFX9: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}} Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll @@ -31,8 +31,8 @@ } ; SI-LABEL: {{^}}true: -; SI-NEXT: BB# -; SI-NEXT: BB# +; SI-NEXT: %bb. +; SI-NEXT: %bb. ; SI-NEXT: s_endpgm define amdgpu_gs void @true() { call void @llvm.amdgcn.kill(i1 true) Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll @@ -5,7 +5,7 @@ declare void @llvm.amdgcn.s.waitcnt(i32) #0 ; GCN-LABEL: {{^}}test_s_dcache_inv: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; SI-NEXT: s_dcache_inv ; encoding: [0x00,0x00,0xc0,0xc7] ; VI-NEXT: s_dcache_inv ; encoding: [0x00,0x00,0x80,0xc0,0x00,0x00,0x00,0x00] ; GCN-NEXT: s_endpgm @@ -15,7 +15,7 @@ } ; GCN-LABEL: {{^}}test_s_dcache_inv_insert_wait: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; GCN: s_dcache_inv ; GCN: s_waitcnt lgkmcnt(0) ; encoding define amdgpu_kernel void @test_s_dcache_inv_insert_wait() #0 { Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll @@ -5,7 +5,7 @@ declare void @llvm.amdgcn.s.waitcnt(i32) #0 ; GCN-LABEL: {{^}}test_s_dcache_inv_vol: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; CI-NEXT: s_dcache_inv_vol ; encoding: [0x00,0x00,0x40,0xc7] ; VI-NEXT: s_dcache_inv_vol ; encoding: [0x00,0x00,0x88,0xc0,0x00,0x00,0x00,0x00] ; GCN-NEXT: s_endpgm @@ -15,7 +15,7 @@ } ; GCN-LABEL: {{^}}test_s_dcache_inv_vol_insert_wait: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; GCN-NEXT: s_dcache_inv_vol ; GCN: s_waitcnt lgkmcnt(0) ; encoding define amdgpu_kernel void @test_s_dcache_inv_vol_insert_wait() #0 { Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll @@ -4,7 +4,7 @@ declare void @llvm.amdgcn.s.waitcnt(i32) #0 ; VI-LABEL: {{^}}test_s_dcache_wb: -; VI-NEXT: ; BB#0: +; VI-NEXT: ; %bb.0: ; VI-NEXT: s_dcache_wb ; encoding: [0x00,0x00,0x84,0xc0,0x00,0x00,0x00,0x00] ; VI-NEXT: s_endpgm define amdgpu_kernel void @test_s_dcache_wb() #0 { @@ -13,7 +13,7 @@ } ; VI-LABEL: {{^}}test_s_dcache_wb_insert_wait: -; VI-NEXT: ; BB#0: +; VI-NEXT: ; %bb.0: ; VI-NEXT: s_dcache_wb ; VI: s_waitcnt lgkmcnt(0) ; encoding define amdgpu_kernel void @test_s_dcache_wb_insert_wait() #0 { Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll @@ -4,7 +4,7 @@ declare void @llvm.amdgcn.s.waitcnt(i32) #0 ; VI-LABEL: {{^}}test_s_dcache_wb_vol: -; VI-NEXT: ; BB#0: +; VI-NEXT: ; %bb.0: ; VI-NEXT: s_dcache_wb_vol ; encoding: [0x00,0x00,0x8c,0xc0,0x00,0x00,0x00,0x00] ; VI-NEXT: s_endpgm define amdgpu_kernel void @test_s_dcache_wb_vol() #0 { @@ -13,7 +13,7 @@ } ; VI-LABEL: {{^}}test_s_dcache_wb_vol_insert_wait: -; VI-NEXT: ; BB#0: +; VI-NEXT: ; %bb.0: ; VI-NEXT: s_dcache_wb_vol ; VI: s_waitcnt lgkmcnt(0) ; encoding define amdgpu_kernel void @test_s_dcache_wb_vol_insert_wait() #0 { Index: llvm/trunk/test/CodeGen/AMDGPU/loop_break.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/loop_break.ll +++ llvm/trunk/test/CodeGen/AMDGPU/loop_break.ll @@ -31,7 +31,7 @@ ; GCN: s_and_b64 vcc, exec, vcc ; GCN-NEXT: s_cbranch_vccnz [[FLOW:BB[0-9]+_[0-9]+]] -; GCN: ; BB#2: ; %bb4 +; GCN: ; %bb.2: ; %bb4 ; GCN: buffer_load_dword ; GCN: v_cmp_ge_i32_e32 vcc, ; GCN: s_or_b64 [[MASK]], vcc, [[INITMASK]] @@ -41,7 +41,7 @@ ; GCN: s_andn2_b64 exec, exec, [[MASK]] ; GCN-NEXT: s_cbranch_execnz [[LOOP_ENTRY]] -; GCN: ; BB#4: ; %bb9 +; GCN: ; %bb.4: ; %bb9 ; GCN-NEXT: s_endpgm define amdgpu_kernel void @break_loop(i32 %arg) #0 { bb: Index: llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll +++ llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll @@ -3,7 +3,7 @@ ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=GCN -check-prefix=GFX8 %s ; FUNC-LABEL: {{^}}system_acquire -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GFX6: s_waitcnt vmcnt(0){{$}} ; GFX6-NEXT: buffer_wbinvl1{{$}} @@ -17,7 +17,7 @@ } ; FUNC-LABEL: {{^}}system_release -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GCN: s_endpgm @@ -28,7 +28,7 @@ } ; FUNC-LABEL: {{^}}system_acq_rel -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GFX6: buffer_wbinvl1{{$}} @@ -41,7 +41,7 @@ } ; FUNC-LABEL: {{^}}system_seq_cst -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GFX6: buffer_wbinvl1{{$}} @@ -54,7 +54,7 @@ } ; FUNC-LABEL: {{^}}singlethread_acquire -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @singlethread_acquire() { @@ -64,7 +64,7 @@ } ; FUNC-LABEL: {{^}}singlethread_release -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @singlethread_release() { @@ -74,7 +74,7 @@ } ; FUNC-LABEL: {{^}}singlethread_acq_rel -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @singlethread_acq_rel() { @@ -84,7 +84,7 @@ } ; FUNC-LABEL: {{^}}singlethread_seq_cst -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @singlethread_seq_cst() { @@ -94,7 +94,7 @@ } ; FUNC-LABEL: {{^}}agent_acquire -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GFX6: s_waitcnt vmcnt(0){{$}} ; GFX6-NEXT: buffer_wbinvl1{{$}} @@ -108,7 +108,7 @@ } ; FUNC-LABEL: {{^}}agent_release -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GCN: s_endpgm @@ -119,7 +119,7 @@ } ; FUNC-LABEL: {{^}}agent_acq_rel -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GFX6: buffer_wbinvl1{{$}} @@ -132,7 +132,7 @@ } ; FUNC-LABEL: {{^}}agent_seq_cst -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GFX6: buffer_wbinvl1{{$}} @@ -145,7 +145,7 @@ } ; FUNC-LABEL: {{^}}workgroup_acquire -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @workgroup_acquire() { @@ -155,7 +155,7 @@ } ; FUNC-LABEL: {{^}}workgroup_release -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @workgroup_release() { @@ -165,7 +165,7 @@ } ; FUNC-LABEL: {{^}}workgroup_acq_rel -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @workgroup_acq_rel() { @@ -175,7 +175,7 @@ } ; FUNC-LABEL: {{^}}workgroup_seq_cst -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @workgroup_seq_cst() { @@ -185,7 +185,7 @@ } ; FUNC-LABEL: {{^}}wavefront_acquire -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @wavefront_acquire() { @@ -195,7 +195,7 @@ } ; FUNC-LABEL: {{^}}wavefront_release -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @wavefront_release() { @@ -205,7 +205,7 @@ } ; FUNC-LABEL: {{^}}wavefront_acq_rel -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @wavefront_acq_rel() { @@ -215,7 +215,7 @@ } ; FUNC-LABEL: {{^}}wavefront_seq_cst -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @wavefront_seq_cst() { Index: llvm/trunk/test/CodeGen/AMDGPU/multilevel-break.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/multilevel-break.ll +++ llvm/trunk/test/CodeGen/AMDGPU/multilevel-break.ll @@ -34,7 +34,7 @@ ; GCN-NEXT: s_andn2_b64 exec, exec, [[OR_BREAK]] ; GCN-NEXT: s_cbranch_execnz [[INNER_LOOP]] -; GCN: ; BB#{{[0-9]+}}: ; %Flow1{{$}} +; GCN: ; %bb.{{[0-9]+}}: ; %Flow1{{$}} ; GCN-NEXT: ; in Loop: Header=[[OUTER_LOOP]] Depth=1 ; Ensure copy is eliminated Index: llvm/trunk/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir +++ llvm/trunk/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir @@ -184,8 +184,8 @@ %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -241,8 +241,8 @@ %vgpr0 = V_MOV_B32_e32 4, implicit %exec %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -298,8 +298,8 @@ %vgpr0 = V_MOV_B32_e32 4, implicit %exec %sgpr2_sgpr3 = S_OR_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -359,8 +359,8 @@ BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -384,7 +384,7 @@ # CHECK: %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc # CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc # CHECK-NEXT: %exec = COPY %sgpr0_sgpr1 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec name: optimize_if_and_saveexec_xor_wrong_reg alignment: 0 exposesReturnsTwice: false @@ -420,8 +420,8 @@ %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term %sgpr0_sgpr1 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7 @@ -443,7 +443,7 @@ # CHECK-NEXT: %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc # CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc # CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec name: optimize_if_and_saveexec_xor_modify_copy_to_exec alignment: 0 @@ -479,8 +479,8 @@ %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -540,8 +540,8 @@ %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1, %sgpr2_sgpr3 @@ -565,7 +565,7 @@ # CHECK: %sgpr0_sgpr1 = COPY %exec # CHECK: %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc # CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec name: optimize_if_unknown_saveexec alignment: 0 @@ -599,8 +599,8 @@ %vgpr0 = V_MOV_B32_e32 4, implicit %exec %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -656,8 +656,8 @@ %vgpr0 = V_MOV_B32_e32 4, implicit %exec %sgpr2_sgpr3 = S_ANDN2_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -680,7 +680,7 @@ # CHECK-LABEL: name: optimize_if_andn2_saveexec_no_commute{{$}} # CHECK: %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc # CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec name: optimize_if_andn2_saveexec_no_commute alignment: 0 exposesReturnsTwice: false @@ -713,8 +713,8 @@ %vgpr0 = V_MOV_B32_e32 4, implicit %exec %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 Index: llvm/trunk/test/CodeGen/AMDGPU/ret_jump.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/ret_jump.ll +++ llvm/trunk/test/CodeGen/AMDGPU/ret_jump.ll @@ -57,7 +57,7 @@ ; GCN-LABEL: {{^}}uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable: ; GCN: s_cbranch_vccnz [[RET_BB:BB[0-9]+_[0-9]+]] -; GCN: ; BB#{{[0-9]+}}: ; %else +; GCN: ; %bb.{{[0-9]+}}: ; %else ; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc ; GCN-NEXT: ; mask branch [[FLOW1:BB[0-9]+_[0-9]+]] Index: llvm/trunk/test/CodeGen/AMDGPU/sgpr-control-flow.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/sgpr-control-flow.ll +++ llvm/trunk/test/CodeGen/AMDGPU/sgpr-control-flow.ll @@ -37,7 +37,7 @@ ; SI: s_cmp_lg_u32 ; SI: s_cbranch_scc0 [[IF:BB[0-9]+_[0-9]+]] -; SI: ; BB#1: ; %else +; SI: ; %bb.1: ; %else ; SI: s_load_dword [[LOAD0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xe ; SI: s_load_dword [[LOAD1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xf ; SI-NOT: add Index: llvm/trunk/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll +++ llvm/trunk/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll @@ -55,7 +55,7 @@ ; GCN: s_cmp_lg_u32 ; GCN: s_cbranch_scc0 [[UNREACHABLE:BB[0-9]+_[0-9]+]] -; GCN-NEXT: BB#{{[0-9]+}}: ; %ret +; GCN-NEXT: %bb.{{[0-9]+}}: ; %ret ; GCN-NEXT: s_endpgm ; GCN: [[UNREACHABLE]]: Index: llvm/trunk/test/CodeGen/AMDGPU/skip-if-dead.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/skip-if-dead.ll +++ llvm/trunk/test/CodeGen/AMDGPU/skip-if-dead.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s ; CHECK-LABEL: {{^}}test_kill_depth_0_imm_pos: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_0_imm_pos() #0 { call void @llvm.AMDGPU.kill(float 0.0) @@ -9,9 +9,9 @@ } ; CHECK-LABEL: {{^}}test_kill_depth_0_imm_neg: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: s_mov_b64 exec, 0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_0_imm_neg() #0 { call void @llvm.AMDGPU.kill(float -0.0) @@ -20,11 +20,11 @@ ; FIXME: Ideally only one would be emitted ; CHECK-LABEL: {{^}}test_kill_depth_0_imm_neg_x2: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: s_mov_b64 exec, 0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: s_mov_b64 exec, 0 -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_0_imm_neg_x2() #0 { call void @llvm.AMDGPU.kill(float -0.0) @@ -33,9 +33,9 @@ } ; CHECK-LABEL: {{^}}test_kill_depth_var: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_var(float %x) #0 { call void @llvm.AMDGPU.kill(float %x) @@ -44,11 +44,11 @@ ; FIXME: Ideally only one would be emitted ; CHECK-LABEL: {{^}}test_kill_depth_var_x2_same: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0 -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_var_x2_same(float %x) #0 { call void @llvm.AMDGPU.kill(float %x) @@ -57,11 +57,11 @@ } ; CHECK-LABEL: {{^}}test_kill_depth_var_x2: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v1 -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_var_x2(float %x, float %y) #0 { call void @llvm.AMDGPU.kill(float %x) @@ -70,12 +70,12 @@ } ; CHECK-LABEL: {{^}}test_kill_depth_var_x2_instructions: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK: v_mov_b32_e64 v7, -1 ; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7 -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 { call void @llvm.AMDGPU.kill(float %x) @@ -90,7 +90,7 @@ ; CHECK: s_cmp_lg_u32 s{{[0-9]+}}, 0 ; CHECK: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]] -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK: v_mov_b32_e64 v7, -1 ; CHECK: v_nop_e64 ; CHECK: v_nop_e64 @@ -105,7 +105,7 @@ ; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7 ; CHECK-NEXT: s_cbranch_execnz [[SPLIT_BB:BB[0-9]+_[0-9]+]] -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: exp null off, off, off, off done vm ; CHECK-NEXT: s_endpgm @@ -141,7 +141,7 @@ ; CHECK-NEXT: v_mov_b32_e32 v{{[0-9]+}}, 0 ; CHECK-NEXT: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]] -; CHECK-NEXT: ; BB#1: ; %bb +; CHECK-NEXT: ; %bb.1: ; %bb ; CHECK: v_mov_b32_e64 v7, -1 ; CHECK: v_nop_e64 ; CHECK: v_nop_e64 @@ -157,7 +157,7 @@ ; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7 ; CHECK-NEXT: s_cbranch_execnz [[SPLIT_BB:BB[0-9]+_[0-9]+]] -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: exp null off, off, off, off done vm ; CHECK-NEXT: s_endpgm @@ -215,7 +215,7 @@ ; CHECK: v_nop_e64 ; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7 -; CHECK-NEXT: ; BB#3: +; CHECK-NEXT: ; %bb.3: ; CHECK: buffer_load_dword [[LOAD:v[0-9]+]] ; CHECK: v_cmp_eq_u32_e32 vcc, 0, [[LOAD]] ; CHECK-NEXT: s_and_b64 vcc, exec, vcc @@ -309,7 +309,7 @@ ; CHECK: [[SKIPKILL]]: ; CHECK: v_cmp_nge_f32_e32 vcc -; CHECK-NEXT: BB#3: ; %bb5 +; CHECK-NEXT: %bb.3: ; %bb5 ; CHECK-NEXT: .Lfunc_end{{[0-9]+}} define amdgpu_ps void @no_skip_no_successors(float inreg %arg, float inreg %arg1) #0 { bb: @@ -335,7 +335,7 @@ } ; CHECK-LABEL: {{^}}if_after_kill_block: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK: s_and_saveexec_b64 ; CHECK: s_xor_b64 ; CHECK-NEXT: mask branch [[BB4:BB[0-9]+_[0-9]+]] Index: llvm/trunk/test/CodeGen/AMDGPU/smrd.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/smrd.ll +++ llvm/trunk/test/CodeGen/AMDGPU/smrd.ll @@ -193,7 +193,7 @@ } ; GCN-LABEL: {{^}}smrd_vgpr_offset_imm: -; GCN-NEXT: BB# +; GCN-NEXT: %bb. ; SICIVI-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen offset:4095 ; @@ -207,7 +207,7 @@ } ; GCN-LABEL: {{^}}smrd_vgpr_offset_imm_too_large: -; GCN-NEXT: BB# +; GCN-NEXT: %bb. ; GCN-NEXT: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}0x1000, v0 ; GCN-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen ; define amdgpu_ps float @smrd_vgpr_offset_imm_too_large(<4 x i32> inreg %desc, i32 %offset) #0 { @@ -218,7 +218,7 @@ } ; GCN-LABEL: {{^}}smrd_imm_merged: -; GCN-NEXT: BB# +; GCN-NEXT: %bb. ; SICI-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x1 ; SICI-NEXT: s_buffer_load_dwordx2 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x7 ; VI-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x4 @@ -243,7 +243,7 @@ } ; GCN-LABEL: {{^}}smrd_vgpr_merged: -; GCN-NEXT: BB# +; GCN-NEXT: %bb. ; SICIVI-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4 ; SICIVI-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28 Index: llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll +++ llvm/trunk/test/CodeGen/AMDGPU/uniform-cfg.ll @@ -401,7 +401,7 @@ ; GCN: s_cmp_lt_i32 [[COND]], 1 ; GCN: s_cbranch_scc1 BB[[FNNUM:[0-9]+]]_3 -; GCN: BB#1: +; GCN: %bb.1: ; GCN-NOT: cmp ; GCN: buffer_load_dword ; GCN: buffer_store_dword Index: llvm/trunk/test/CodeGen/AMDGPU/valu-i1.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/valu-i1.ll +++ llvm/trunk/test/CodeGen/AMDGPU/valu-i1.ll @@ -192,7 +192,7 @@ ; Load loop limit from buffer ; Branch to exit if uniformly not taken -; SI: ; BB#0: +; SI: ; %bb.0: ; SI: buffer_load_dword [[VBOUND:v[0-9]+]] ; SI: v_cmp_lt_i32_e32 vcc ; SI: s_and_saveexec_b64 [[OUTER_CMP_SREG:s\[[0-9]+:[0-9]+\]]], vcc Index: llvm/trunk/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir +++ llvm/trunk/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir @@ -48,7 +48,7 @@ # CHECK-LABEL: name: vccz_corrupt_workaround # CHECK: %vcc = V_CMP_EQ_F32 # CHECK-NEXT: %vcc = S_MOV_B64 %vcc -# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2.else, implicit killed %vcc +# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit killed %vcc name: vccz_corrupt_workaround alignment: 0 @@ -82,7 +82,7 @@ %sgpr7 = S_MOV_B32 61440 %sgpr6 = S_MOV_B32 -1 %vcc = V_CMP_EQ_F32_e64 0, 0, 0, %sgpr2, 0, implicit %exec - S_CBRANCH_VCCZ %bb.1.else, implicit killed %vcc + S_CBRANCH_VCCZ %bb.1, implicit killed %vcc bb.2.if: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 @@ -90,7 +90,7 @@ %vgpr0 = V_MOV_B32_e32 9, implicit %exec BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) %vgpr0 = V_MOV_B32_e32 0, implicit %exec - S_BRANCH %bb.3.done + S_BRANCH %bb.3 bb.1.else: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 @@ -111,7 +111,7 @@ --- # CHECK-LABEL: name: vccz_corrupt_undef_vcc # CHECK: S_WAITCNT -# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2.else, implicit undef %vcc +# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef %vcc name: vccz_corrupt_undef_vcc alignment: 0 @@ -143,7 +143,7 @@ %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %sgpr7 = S_MOV_B32 61440 %sgpr6 = S_MOV_B32 -1 - S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc + S_CBRANCH_VCCZ %bb.1, implicit undef %vcc bb.2.if: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 @@ -151,7 +151,7 @@ %vgpr0 = V_MOV_B32_e32 9, implicit %exec BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) %vgpr0 = V_MOV_B32_e32 0, implicit %exec - S_BRANCH %bb.3.done + S_BRANCH %bb.3 bb.1.else: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 Index: llvm/trunk/test/CodeGen/ARM/Windows/dbzchk.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/Windows/dbzchk.ll +++ llvm/trunk/test/CodeGen/ARM/Windows/dbzchk.ll @@ -32,13 +32,13 @@ ret i32 %2 } -; CHECK-DIV-DAG: BB#0 -; CHECK-DIV-DAG: Successors according to CFG: BB#1({{.*}}) BB#2 -; CHECK-DIV-DAG: BB#1 -; CHECK-DIV-DAG: Successors according to CFG: BB#3 -; CHECK-DIV-DAG: BB#2 -; CHECK-DIV-DAG: Successors according to CFG: BB#3 -; CHECK-DIV-DAG: BB#3 +; CHECK-DIV-DAG: %bb.0 +; CHECK-DIV-DAG: Successors according to CFG: %bb.1({{.*}}) %bb.2 +; CHECK-DIV-DAG: %bb.1 +; CHECK-DIV-DAG: Successors according to CFG: %bb.3 +; CHECK-DIV-DAG: %bb.2 +; CHECK-DIV-DAG: Successors according to CFG: %bb.3 +; CHECK-DIV-DAG: %bb.3 ; RUN: llc -mtriple thumbv7--windows-itanium -print-machineinstrs=expand-isel-pseudos -verify-machineinstrs -o /dev/null %s 2>&1 | FileCheck %s -check-prefix CHECK-MOD @@ -66,13 +66,13 @@ ret i32 %retval.0 } -; CHECK-MOD-DAG: BB#0 -; CHECK-MOD-DAG: Successors according to CFG: BB#2({{.*}}) BB#1 -; CHECK-MOD-DAG: BB#1 -; CHECK-MOD-DAG: Successors according to CFG: BB#3 -; CHECK-MOD-DAG: BB#3 -; CHECK-MOD-DAG: Successors according to CFG: BB#2 -; CHECK-MOD-DAG: BB#2 +; CHECK-MOD-DAG: %bb.0 +; CHECK-MOD-DAG: Successors according to CFG: %bb.2({{.*}}) %bb.1 +; CHECK-MOD-DAG: %bb.1 +; CHECK-MOD-DAG: Successors according to CFG: %bb.3 +; CHECK-MOD-DAG: %bb.3 +; CHECK-MOD-DAG: Successors according to CFG: %bb.2 +; CHECK-MOD-DAG: %bb.2 ; RUN: llc -mtriple thumbv7--windows-itanium -print-machineinstrs=expand-isel-pseudos -verify-machineinstrs -filetype asm -o /dev/null %s 2>&1 | FileCheck %s -check-prefix CHECK-CFG ; RUN: llc -mtriple thumbv7--windows-itanium -verify-machineinstrs -filetype asm -o - %s | FileCheck %s -check-prefix CHECK-CFG-ASM @@ -111,23 +111,23 @@ attributes #0 = { optsize } -; CHECK-CFG-DAG: BB#0 -; CHECK-CFG-DAG: t2Bcc -; CHECK-CFG-DAG: t2B +; CHECK-CFG-DAG: %bb.0 +; CHECK-CFG-DAG: t2Bcc %bb.2 +; CHECK-CFG-DAG: t2B %bb.1 -; CHECK-CFG-DAG: BB#1 -; CHECK-CFG-DAG: t2B +; CHECK-CFG-DAG: %bb.1 +; CHECK-CFG-DAG: t2B %bb.3 -; CHECK-CFG-DAG: BB#2 +; CHECK-CFG-DAG: %bb.2 ; CHECK-CFG-DAG: tCMPi8 %{{[0-9]}}, 0 -; CHECK-CFG-DAG: t2Bcc +; CHECK-CFG-DAG: t2Bcc %bb.5 -; CHECK-CFG-DAG: BB#4 +; CHECK-CFG-DAG: %bb.4 -; CHECK-CFG-DAG: BB#3 +; CHECK-CFG-DAG: %bb.3 ; CHECK-CFG-DAG: tBX_RET -; CHECK-CFG-DAG: BB#5 +; CHECK-CFG-DAG: %bb.5 ; CHECK-CFG-DAG: t__brkdiv0 ; CHECK-CFG-ASM-LABEL: h: Index: llvm/trunk/test/CodeGen/ARM/and-load-combine.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/and-load-combine.ll +++ llvm/trunk/test/CodeGen/ARM/and-load-combine.ll @@ -6,7 +6,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a, ; ARM-LABEL: cmp_xor8_short_short: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: ldrh r1, [r1] ; ARM-NEXT: eor r1, r1, r0 @@ -16,7 +16,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_xor8_short_short: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: ldrh r1, [r1] ; ARMEB-NEXT: eor r1, r1, r0 @@ -26,7 +26,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_xor8_short_short: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r0, [r0] ; THUMB1-NEXT: ldrh r2, [r1] ; THUMB1-NEXT: eors r2, r0 @@ -34,13 +34,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB0_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB0_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_xor8_short_short: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ldrh r1, [r1] ; THUMB2-NEXT: eors r0, r1 @@ -61,7 +61,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a, ; ARM-LABEL: cmp_xor8_short_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: eor r1, r1, r0 @@ -71,7 +71,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_xor8_short_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: eor r1, r1, r0 @@ -81,7 +81,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_xor8_short_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: eors r2, r0 @@ -89,13 +89,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB1_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB1_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_xor8_short_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: eors r0, r1 @@ -117,7 +117,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a, ; ARM-LABEL: cmp_xor8_int_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: eor r1, r1, r0 @@ -127,7 +127,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_xor8_int_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: eor r1, r1, r0 @@ -137,7 +137,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_xor8_int_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: eors r2, r0 @@ -145,13 +145,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB2_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB2_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_xor8_int_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: eors r0, r1 @@ -172,7 +172,7 @@ define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a, ; ARM-LABEL: cmp_xor16: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: movw r2, #65535 ; ARM-NEXT: ldr r1, [r1] @@ -183,7 +183,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_xor16: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: movw r2, #65535 ; ARMEB-NEXT: ldr r1, [r1] @@ -194,7 +194,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_xor16: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: eors r2, r0 @@ -202,13 +202,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #16 ; THUMB1-NEXT: beq .LBB3_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB3_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_xor16: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: eors r0, r1 @@ -229,7 +229,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a, ; ARM-LABEL: cmp_or8_short_short: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: ldrh r1, [r1] ; ARM-NEXT: orr r1, r1, r0 @@ -239,7 +239,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_or8_short_short: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: ldrh r1, [r1] ; ARMEB-NEXT: orr r1, r1, r0 @@ -249,7 +249,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_or8_short_short: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r0, [r0] ; THUMB1-NEXT: ldrh r2, [r1] ; THUMB1-NEXT: orrs r2, r0 @@ -257,13 +257,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB4_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB4_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_or8_short_short: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ldrh r1, [r1] ; THUMB2-NEXT: orrs r0, r1 @@ -284,7 +284,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a, ; ARM-LABEL: cmp_or8_short_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: orr r1, r1, r0 @@ -294,7 +294,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_or8_short_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: orr r1, r1, r0 @@ -304,7 +304,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_or8_short_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: orrs r2, r0 @@ -312,13 +312,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB5_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB5_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_or8_short_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: orrs r0, r1 @@ -340,7 +340,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a, ; ARM-LABEL: cmp_or8_int_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: orr r1, r1, r0 @@ -350,7 +350,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_or8_int_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: orr r1, r1, r0 @@ -360,7 +360,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_or8_int_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: orrs r2, r0 @@ -368,13 +368,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB6_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB6_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_or8_int_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: orrs r0, r1 @@ -395,7 +395,7 @@ define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a, ; ARM-LABEL: cmp_or16: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: movw r2, #65535 ; ARM-NEXT: ldr r1, [r1] @@ -406,7 +406,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_or16: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: movw r2, #65535 ; ARMEB-NEXT: ldr r1, [r1] @@ -417,7 +417,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_or16: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: orrs r2, r0 @@ -425,13 +425,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #16 ; THUMB1-NEXT: beq .LBB7_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB7_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_or16: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: orrs r0, r1 @@ -452,7 +452,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a, ; ARM-LABEL: cmp_and8_short_short: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r1, [r1] ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: and r1, r0, r1 @@ -462,7 +462,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_and8_short_short: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r1, [r1] ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: and r1, r0, r1 @@ -472,7 +472,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_and8_short_short: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r1, [r1] ; THUMB1-NEXT: ldrh r2, [r0] ; THUMB1-NEXT: ands r2, r1 @@ -480,13 +480,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB8_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB8_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_and8_short_short: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r1, [r1] ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ands r0, r1 @@ -507,7 +507,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a, ; ARM-LABEL: cmp_and8_short_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: and r1, r1, r0 @@ -517,7 +517,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_and8_short_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: and r1, r1, r0 @@ -527,7 +527,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_and8_short_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: ands r2, r0 @@ -535,13 +535,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB9_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB9_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_and8_short_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ands r0, r1 @@ -563,7 +563,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a, ; ARM-LABEL: cmp_and8_int_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: and r1, r0, r1 @@ -573,7 +573,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_and8_int_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: and r1, r0, r1 @@ -583,7 +583,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_and8_int_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: ldr r2, [r0] ; THUMB1-NEXT: ands r2, r1 @@ -591,13 +591,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB10_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB10_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_and8_int_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ands r0, r1 @@ -618,7 +618,7 @@ define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a, ; ARM-LABEL: cmp_and16: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: movw r2, #65535 ; ARM-NEXT: ldr r0, [r0] @@ -629,7 +629,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_and16: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: movw r2, #65535 ; ARMEB-NEXT: ldr r0, [r0] @@ -640,7 +640,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_and16: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: ldr r2, [r0] ; THUMB1-NEXT: ands r2, r1 @@ -648,13 +648,13 @@ ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #16 ; THUMB1-NEXT: beq .LBB11_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB11_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_and16: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ands r0, r1 @@ -675,7 +675,7 @@ define arm_aapcscc i32 @add_and16(i32* nocapture readonly %a, i32 %y, i32 %z) { ; ARM-LABEL: add_and16: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: add r1, r1, r2 ; ARM-NEXT: orr r0, r0, r1 @@ -683,7 +683,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: add_and16: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: add r1, r1, r2 ; ARMEB-NEXT: orr r0, r0, r1 @@ -691,7 +691,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: add_and16: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: adds r1, r1, r2 ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: orrs r0, r1 @@ -699,7 +699,7 @@ ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: add_and16: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: add r1, r2 ; THUMB2-NEXT: orrs r0, r1 @@ -715,7 +715,7 @@ define arm_aapcscc i32 @test1(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARM-LABEL: test1: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: mul r2, r2, r3 ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: ldr r0, [r0] @@ -725,7 +725,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: test1: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: mul r2, r2, r3 ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: ldr r0, [r0] @@ -735,7 +735,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: test1: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: muls r2, r3, r2 ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: ldr r0, [r0] @@ -745,7 +745,7 @@ ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: test1: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: muls r2, r3, r2 ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] @@ -765,7 +765,7 @@ define arm_aapcscc i32 @test2(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARM-LABEL: test2: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: mul r1, r2, r1 @@ -775,7 +775,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: test2: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: mul r1, r2, r1 @@ -785,7 +785,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: test2: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: muls r1, r2, r1 ; THUMB1-NEXT: ldr r0, [r0] @@ -795,7 +795,7 @@ ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: test2: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: muls r1, r2, r1 @@ -815,7 +815,7 @@ define arm_aapcscc i32 @test3(i32* %a, i32* %b, i32 %x, i16* %y) { ; ARM-LABEL: test3: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: mul r1, r2, r0 ; ARM-NEXT: ldrh r2, [r3] @@ -825,7 +825,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: test3: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: mul r1, r2, r0 ; ARMEB-NEXT: ldrh r2, [r3] @@ -835,7 +835,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: test3: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: muls r2, r0, r2 ; THUMB1-NEXT: ldrh r1, [r3] @@ -845,7 +845,7 @@ ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: test3: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: mul r1, r2, r0 ; THUMB2-NEXT: ldrh r2, [r3] @@ -866,7 +866,7 @@ define arm_aapcscc i32 @test4(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARM-LABEL: test4: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: mul r2, r2, r3 ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: ldr r0, [r0] @@ -876,7 +876,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: test4: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: mul r2, r2, r3 ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: ldr r0, [r0] @@ -886,7 +886,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: test4: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: muls r2, r3, r2 ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: ldr r0, [r0] @@ -896,7 +896,7 @@ ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: test4: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: muls r2, r3, r2 ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] @@ -916,7 +916,7 @@ define arm_aapcscc i32 @test5(i32* %a, i32* %b, i32 %x, i16 zeroext %y) { ; ARM-LABEL: test5: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: mul r1, r2, r1 @@ -926,7 +926,7 @@ ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: test5: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: mul r1, r2, r1 @@ -936,7 +936,7 @@ ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: test5: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: muls r1, r2, r1 ; THUMB1-NEXT: ldr r0, [r0] @@ -946,7 +946,7 @@ ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: test5: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: muls r1, r2, r1 Index: llvm/trunk/test/CodeGen/ARM/arm-and-tst-peephole.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/arm-and-tst-peephole.ll +++ llvm/trunk/test/CodeGen/ARM/arm-and-tst-peephole.ll @@ -142,27 +142,27 @@ define i32 @test_tst_assessment(i32 %a, i32 %b) { ; ARM-LABEL: test_tst_assessment: -; ARM: @ BB#0: +; ARM: @ %bb.0: ; ARM-NEXT: and r0, r0, #1 ; ARM-NEXT: tst r1, #1 ; ARM-NEXT: subne r0, r0, #1 ; ARM-NEXT: mov pc, lr ; ; THUMB-LABEL: test_tst_assessment: -; THUMB: @ BB#0: +; THUMB: @ %bb.0: ; THUMB-NEXT: movs r2, r0 ; THUMB-NEXT: movs r0, #1 ; THUMB-NEXT: ands r0, r2 ; THUMB-NEXT: subs r2, r0, #1 ; THUMB-NEXT: lsls r1, r1, #31 ; THUMB-NEXT: beq .LBB2_2 -; THUMB-NEXT: @ BB#1: +; THUMB-NEXT: @ %bb.1: ; THUMB-NEXT: movs r0, r2 ; THUMB-NEXT: .LBB2_2: ; THUMB-NEXT: bx lr ; ; T2-LABEL: test_tst_assessment: -; T2: @ BB#0: +; T2: @ %bb.0: ; T2-NEXT: lsls r1, r1, #31 ; T2-NEXT: and r0, r0, #1 ; T2-NEXT: it ne @@ -170,7 +170,7 @@ ; T2-NEXT: bx lr ; ; V8-LABEL: test_tst_assessment: -; V8: @ BB#0: +; V8: @ %bb.0: ; V8-NEXT: and r0, r0, #1 ; V8-NEXT: lsls r1, r1, #31 ; V8-NEXT: it ne Index: llvm/trunk/test/CodeGen/ARM/atomic-ops-v8.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/atomic-ops-v8.ll +++ llvm/trunk/test/CodeGen/ARM/atomic-ops-v8.ll @@ -1046,7 +1046,7 @@ ; CHECK-ARM-NEXT: cmp r[[OLD]], r0 ; CHECK-THUMB-NEXT: cmp r[[OLD]], r[[WANTED]] ; CHECK-NEXT: bne .LBB{{[0-9]+}}_4 -; CHECK-NEXT: BB#2: +; CHECK-NEXT: %bb.2: ; As above, r1 is a reasonable guess. ; CHECK: strexb [[STATUS:r[0-9]+]], r1, [r[[ADDR]]] ; CHECK-NEXT: cmp [[STATUS]], #0 @@ -1080,7 +1080,7 @@ ; CHECK-ARM-NEXT: cmp r[[OLD]], r0 ; CHECK-THUMB-NEXT: cmp r[[OLD]], r[[WANTED]] ; CHECK-NEXT: bne .LBB{{[0-9]+}}_4 -; CHECK-NEXT: BB#2: +; CHECK-NEXT: %bb.2: ; As above, r1 is a reasonable guess. ; CHECK: stlexh [[STATUS:r[0-9]+]], r1, [r[[ADDR]]] ; CHECK-NEXT: cmp [[STATUS]], #0 @@ -1113,7 +1113,7 @@ ; function there. ; CHECK-NEXT: cmp r[[OLD]], r0 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_4 -; CHECK-NEXT: BB#2: +; CHECK-NEXT: %bb.2: ; As above, r1 is a reasonable guess. ; CHECK: stlex [[STATUS:r[0-9]+]], r1, [r[[ADDR]]] ; CHECK-NEXT: cmp [[STATUS]], #0 @@ -1152,7 +1152,7 @@ ; CHECK-ARM-BE: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_HI]], [[MISMATCH_LO]] ; CHECK-THUMB-BE: orrs{{(\.w)?}} {{(r[0-9]+, )?}}[[MISMATCH_LO]], [[MISMATCH_HI]] ; CHECK-NEXT: bne .LBB{{[0-9]+}}_4 -; CHECK-NEXT: BB#2: +; CHECK-NEXT: %bb.2: ; As above, r2, r3 is a reasonable guess. ; CHECK: strexd [[STATUS:r[0-9]+]], r2, r3, [r[[ADDR]]] ; CHECK-NEXT: cmp [[STATUS]], #0 Index: llvm/trunk/test/CodeGen/ARM/bool-ext-inc.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/bool-ext-inc.ll +++ llvm/trunk/test/CodeGen/ARM/bool-ext-inc.ll @@ -3,7 +3,7 @@ define i32 @sext_inc(i1 zeroext %x) { ; CHECK-LABEL: sext_inc: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: eor r0, r0, #1 ; CHECK-NEXT: mov pc, lr %ext = sext i1 %x to i32 @@ -13,7 +13,7 @@ define <4 x i32> @sext_inc_vec(<4 x i1> %x) { ; CHECK-LABEL: sext_inc_vec: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov.i16 d16, #0x1 ; CHECK-NEXT: vmov d17, r0, r1 ; CHECK-NEXT: veor d16, d17, d16 @@ -30,7 +30,7 @@ define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmpgt_sext_inc_vec: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: mov r0, sp @@ -49,7 +49,7 @@ define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmpne_sext_inc_vec: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: mov r12, sp ; CHECK-NEXT: vld1.64 {d18, d19}, [r12] Index: llvm/trunk/test/CodeGen/ARM/cmpxchg-weak.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/cmpxchg-weak.ll +++ llvm/trunk/test/CodeGen/ARM/cmpxchg-weak.ll @@ -5,16 +5,16 @@ %pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic %oldval = extractvalue { i32, i1 } %pair, 0 -; CHECK-NEXT: BB#0: +; CHECK-NEXT: %bb.0: ; CHECK-NEXT: ldrex [[LOADED:r[0-9]+]], [r0] ; CHECK-NEXT: cmp [[LOADED]], r1 ; CHECK-NEXT: bne [[LDFAILBB:LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: BB#1: +; CHECK-NEXT: %bb.1: ; CHECK-NEXT: dmb ish ; CHECK-NEXT: strex [[SUCCESS:r[0-9]+]], r2, [r0] ; CHECK-NEXT: cmp [[SUCCESS]], #0 ; CHECK-NEXT: beq [[SUCCESSBB:LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: BB#2: +; CHECK-NEXT: %bb.2: ; CHECK-NEXT: str r3, [r0] ; CHECK-NEXT: bx lr ; CHECK-NEXT: [[LDFAILBB]]: @@ -37,11 +37,11 @@ %pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic %success = extractvalue { i32, i1 } %pair, 1 -; CHECK-NEXT: BB#0: +; CHECK-NEXT: %bb.0: ; CHECK-NEXT: ldrex [[LOADED:r[0-9]+]], [r1] ; CHECK-NEXT: cmp [[LOADED]], r2 ; CHECK-NEXT: bne [[LDFAILBB:LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: BB#1: +; CHECK-NEXT: %bb.1: ; CHECK-NEXT: dmb ish ; CHECK-NEXT: mov r0, #0 ; CHECK-NEXT: strex [[SUCCESS:r[0-9]+]], r3, [r1] Index: llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-alu.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-alu.ll +++ llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-alu.ll @@ -5,7 +5,7 @@ ; Check the latency for ALU shifted operand variants. ; ; CHECK: ********** MI Scheduling ********** -; CHECK: foo:BB#0 entry +; CHECK: foo:%bb.0 entry ; ALU, basic - 1 cyc I0/I1 ; CHECK: EORrr Index: llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-basic.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-basic.ll +++ llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-basic.ll @@ -6,7 +6,7 @@ ; SDIV should be scheduled at the block's begin (20 cyc of independent M unit). ; ; CHECK: ********** MI Scheduling ********** -; CHECK: foo:BB#0 entry +; CHECK: foo:%bb.0 entry ; GENERIC: LDRi12 ; GENERIC: Latency : 1 @@ -30,7 +30,7 @@ ; A57_SCHED: SUBrr ; A57_SCHED: Latency : 1 -; CHECK: ** Final schedule for BB#0 *** +; CHECK: ** Final schedule for %bb.0 *** ; GENERIC: LDRi12 ; GENERIC: SDIV ; A57_SCHED: SDIV Index: llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-vadd.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-vadd.ll +++ llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-vadd.ll @@ -1,7 +1,7 @@ ; REQUIRES: asserts ; RUN: llc < %s -mtriple=armv8r-eabi -mcpu=cortex-a57 -misched-postra -enable-misched -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s -; CHECK-LABEL: addv_i32:BB#0 +; CHECK-LABEL: addv_i32:%bb.0 ; CHECK: SU(8): {{.*}} VADDv4i32 ; CHECK-NEXT: # preds left ; CHECK-NEXT: # succs left @@ -13,7 +13,7 @@ ret <4 x i32> %3 } -; CHECK-LABEL: addv_f32:BB#0 +; CHECK-LABEL: addv_f32:%bb.0 ; CHECK: SU(8): {{.*}} VADDfq ; CHECK-NEXT: # preds left ; CHECK-NEXT: # succs left Index: llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-vfma.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-vfma.ll +++ llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-vfma.ll @@ -5,7 +5,7 @@ define float @Test1(float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test1:BB#0 +; CHECK: Test1:%bb.0 ; CHECK: VMULS ; > VMULS common latency = 5 @@ -44,7 +44,7 @@ ; ASIMD form define <2 x float> @Test2(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 x float> %f4, <2 x float> %f5, <2 x float> %f6) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test2:BB#0 +; CHECK: Test2:%bb.0 ; CHECK: VMULfd ; > VMULfd common latency = 5 @@ -82,7 +82,7 @@ define float @Test3(float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test3:BB#0 +; CHECK: Test3:%bb.0 ; CHECK: VMULS ; > VMULS common latency = 5 @@ -121,7 +121,7 @@ ; ASIMD form define <2 x float> @Test4(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 x float> %f4, <2 x float> %f5, <2 x float> %f6) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test4:BB#0 +; CHECK: Test4:%bb.0 ; CHECK: VMULfd ; > VMULfd common latency = 5 @@ -159,7 +159,7 @@ define float @Test5(float %f1, float %f2, float %f3) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test5:BB#0 +; CHECK: Test5:%bb.0 ; CHECK-DEFAULT: VNMLS ; CHECK-FAST: VFNMS @@ -178,7 +178,7 @@ define float @Test6(float %f1, float %f2, float %f3) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test6:BB#0 +; CHECK: Test6:%bb.0 ; CHECK-DEFAULT: VNMLA ; CHECK-FAST: VFNMA Index: llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-vsub.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-vsub.ll +++ llvm/trunk/test/CodeGen/ARM/cortex-a57-misched-vsub.ll @@ -1,7 +1,7 @@ ; REQUIRES: asserts ; RUN: llc < %s -mtriple=armv8r-eabi -mcpu=cortex-a57 -misched-postra -enable-misched -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s -; CHECK-LABEL: subv_i32:BB#0 +; CHECK-LABEL: subv_i32:%bb.0 ; CHECK: SU(8): {{.*}} VSUBv4i32 ; CHECK-NEXT: # preds left ; CHECK-NEXT: # succs left @@ -13,7 +13,7 @@ ret <4 x i32> %3 } -; CHECK-LABEL: subv_f32:BB#0 +; CHECK-LABEL: subv_f32:%bb.0 ; CHECK: SU(8): {{.*}} VSUBfq ; CHECK-NEXT: # preds left ; CHECK-NEXT: # succs left Index: llvm/trunk/test/CodeGen/ARM/cortexr52-misched-basic.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/cortexr52-misched-basic.ll +++ llvm/trunk/test/CodeGen/ARM/cortexr52-misched-basic.ll @@ -7,7 +7,7 @@ ; as div takes more cycles to compute than eor. ; ; CHECK: ********** MI Scheduling ********** -; CHECK: foo:BB#0 entry +; CHECK: foo:%bb.0 entry ; CHECK: EORrr ; GENERIC: Latency : 1 ; R52_SCHED: Latency : 3 @@ -17,7 +17,7 @@ ; CHECK: SDIV ; GENERIC: Latency : 0 ; R52_SCHED: Latency : 8 -; CHECK: ** Final schedule for BB#0 *** +; CHECK: ** Final schedule for %bb.0 *** ; GENERIC: EORrr ; GENERIC: SDIV ; R52_SCHED: SDIV Index: llvm/trunk/test/CodeGen/ARM/crash-on-pow2-shufflevector.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/crash-on-pow2-shufflevector.ll +++ llvm/trunk/test/CodeGen/ARM/crash-on-pow2-shufflevector.ll @@ -6,7 +6,7 @@ define i32 @foo(%struct.desc* %descs, i32 %num, i32 %cw) local_unnamed_addr #0 { ; CHECK-LABEL: foo: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: mov r1, #32 ; CHECK-NEXT: vld1.32 {d16, d17}, [r0], r1 ; CHECK-NEXT: vld1.32 {d18, d19}, [r0] Index: llvm/trunk/test/CodeGen/ARM/deprecated-asm.s =================================================================== --- llvm/trunk/test/CodeGen/ARM/deprecated-asm.s +++ llvm/trunk/test/CodeGen/ARM/deprecated-asm.s @@ -25,7 +25,7 @@ .type foo,%function foo: @ @foo .fnstart -@ BB#0: @ %entry +@ %bb.0: @ %entry mov r0, #0 bx lr stmia r4!, {r12-r14} Index: llvm/trunk/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll +++ llvm/trunk/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll @@ -21,8 +21,8 @@ ; Afer if conversion, we have ; for.body -> for.cond.backedge (100%) ; -> cond.false.i (0%) -; CHECK: BB#1: derived from LLVM BB %for.body -; CHECK: Successors according to CFG: BB#2(0x80000000 / 0x80000000 = 100.00%) BB#4(0x00000001 / 0x80000000 = 0.00%) +; CHECK: %bb.1: derived from LLVM BB %for.body +; CHECK: Successors according to CFG: %bb.2(0x80000000 / 0x80000000 = 100.00%) %bb.4(0x00000001 / 0x80000000 = 0.00%) for.body: br i1 undef, label %for.cond.backedge, label %lor.lhs.false.i, !prof !1 Index: llvm/trunk/test/CodeGen/ARM/ifcvt-branch-weight.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/ifcvt-branch-weight.ll +++ llvm/trunk/test/CodeGen/ARM/ifcvt-branch-weight.ll @@ -18,8 +18,8 @@ %9 = icmp eq i32 %8, 0 br i1 %9, label %return, label %bb2 -; CHECK: BB#2: derived from LLVM BB %bb2 -; CHECK: Successors according to CFG: BB#4({{[0-9a-fx/= ]+}}50.00%) BB#3({{[0-9a-fx/= ]+}}50.00%) +; CHECK: %bb.2: derived from LLVM BB %bb2 +; CHECK: Successors according to CFG: %bb.4({{[0-9a-fx/= ]+}}50.00%) %bb.3({{[0-9a-fx/= ]+}}50.00%) bb2: %v10 = icmp eq i32 %3, 16 Index: llvm/trunk/test/CodeGen/ARM/ifcvt-iter-indbr.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/ifcvt-iter-indbr.ll +++ llvm/trunk/test/CodeGen/ARM/ifcvt-iter-indbr.ll @@ -30,10 +30,10 @@ ; CHECK-NEXT: [[FOOCALL]]: ; CHECK-NEXT: bl _foo ; -; CHECK-PROB: BB#0: -; CHECK-PROB: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}50.00%) BB#3({{[0-9a-fx/= ]+}}25.00%) BB#5({{[0-9a-fx/= ]+}}25.00%) -; CHECK-PROB: BB#2: -; CHECK-PROB: Successors according to CFG: BB#3({{[0-9a-fx/= ]+}}50.00%) BB#5({{[0-9a-fx/= ]+}}50.00%) +; CHECK-PROB: %bb.0: +; CHECK-PROB: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}50.00%) %bb.3({{[0-9a-fx/= ]+}}25.00%) %bb.5({{[0-9a-fx/= ]+}}25.00%) +; CHECK-PROB: %bb.2: +; CHECK-PROB: Successors according to CFG: %bb.3({{[0-9a-fx/= ]+}}50.00%) %bb.5({{[0-9a-fx/= ]+}}50.00%) define i32 @test(i32 %a, i32 %a2, i32* %p, i32* %p2) "no-frame-pointer-elim"="true" { entry: Index: llvm/trunk/test/CodeGen/ARM/illegal-bitfield-loadstore.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/illegal-bitfield-loadstore.ll +++ llvm/trunk/test/CodeGen/ARM/illegal-bitfield-loadstore.ll @@ -4,14 +4,14 @@ define void @i24_or(i24* %a) { ; LE-LABEL: i24_or: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: ldrh r1, [r0] ; LE-NEXT: orr r1, r1, #384 ; LE-NEXT: strh r1, [r0] ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i24_or: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: ldrh r1, [r0] ; BE-NEXT: ldrb r2, [r0, #2] ; BE-NEXT: orr r1, r2, r1, lsl #8 @@ -28,7 +28,7 @@ define void @i24_and_or(i24* %a) { ; LE-LABEL: i24_and_or: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: ldrh r1, [r0] ; LE-NEXT: mov r2, #16256 ; LE-NEXT: orr r2, r2, #49152 @@ -38,7 +38,7 @@ ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i24_and_or: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: mov r1, #128 ; BE-NEXT: strb r1, [r0, #2] ; BE-NEXT: ldrh r1, [r0] @@ -54,7 +54,7 @@ define void @i24_insert_bit(i24* %a, i1 zeroext %bit) { ; LE-LABEL: i24_insert_bit: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: mov r3, #255 ; LE-NEXT: ldrh r2, [r0] ; LE-NEXT: orr r3, r3, #57088 @@ -64,7 +64,7 @@ ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i24_insert_bit: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: ldrh r2, [r0] ; BE-NEXT: mov r3, #57088 ; BE-NEXT: orr r3, r3, #16711680 @@ -84,14 +84,14 @@ define void @i56_or(i56* %a) { ; LE-LABEL: i56_or: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: ldr r1, [r0] ; LE-NEXT: orr r1, r1, #384 ; LE-NEXT: str r1, [r0] ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i56_or: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: mov r1, r0 ; BE-NEXT: ldr r12, [r0] ; BE-NEXT: ldrh r2, [r1, #4]! @@ -114,7 +114,7 @@ define void @i56_and_or(i56* %a) { ; LE-LABEL: i56_and_or: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: ldr r1, [r0] ; LE-NEXT: orr r1, r1, #384 ; LE-NEXT: bic r1, r1, #127 @@ -122,7 +122,7 @@ ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i56_and_or: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: mov r1, r0 ; BE-NEXT: ldr r12, [r0] ; BE-NEXT: ldrh r2, [r1, #4]! @@ -147,7 +147,7 @@ define void @i56_insert_bit(i56* %a, i1 zeroext %bit) { ; LE-LABEL: i56_insert_bit: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: ldr r2, [r0] ; LE-NEXT: bic r2, r2, #8192 ; LE-NEXT: orr r1, r2, r1, lsl #13 @@ -155,7 +155,7 @@ ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i56_insert_bit: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: .save {r11, lr} ; BE-NEXT: push {r11, lr} ; BE-NEXT: mov r2, r0 Index: llvm/trunk/test/CodeGen/ARM/jump-table-tbh.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/jump-table-tbh.ll +++ llvm/trunk/test/CodeGen/ARM/jump-table-tbh.ll @@ -10,7 +10,7 @@ ; T2-LABEL: test_tbh: ; T2: [[ANCHOR:.LCPI[0-9_]+]]: ; T2: tbh [pc, r{{[0-9]+}}, lsl #1] -; T2-NEXT: @ BB#{{[0-9]+}} +; T2-NEXT: @ %bb.{{[0-9]+}} ; T2-NEXT: LJTI ; T2-NEXT: .short (.LBB0_[[x:[0-9]+]]-([[ANCHOR]]+4))/2 ; T2-NEXT: .short (.LBB0_{{[0-9]+}}-([[ANCHOR]]+4))/2 @@ -24,7 +24,7 @@ ; T1: lsls [[x]], [[x]], #1 ; T1: [[ANCHOR:.LCPI[0-9_]+]]: ; T1: add pc, [[x]] -; T1-NEXT: @ BB#2 +; T1-NEXT: @ %bb.2 ; T1-NEXT: .p2align 2 ; T1-NEXT: LJTI ; T1-NEXT: .short (.LBB0_[[x:[0-9]+]]-([[ANCHOR]]+4))/2 Index: llvm/trunk/test/CodeGen/ARM/machine-licm.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/machine-licm.ll +++ llvm/trunk/test/CodeGen/ARM/machine-licm.ll @@ -31,7 +31,7 @@ ; ARM-NOT: LCPI0_1: ; ARM: .section -; THUMB: BB#1 +; THUMB: %bb.1 ; THUMB: ldr r2, LCPI0_0 ; THUMB: add r2, pc ; THUMB: ldr r{{[0-9]+}}, [r2] Index: llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll +++ llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll @@ -4,7 +4,7 @@ ; Loop counter copies should be eliminated. ; There is also a MUL here, but we don't care where it is scheduled. ; CHECK: postinc -; CHECK: *** Final schedule for BB#2 *** +; CHECK: *** Final schedule for %bb.2 *** ; CHECK: t2LDRs ; CHECK: t2ADDrr ; CHECK: t2CMPrr @@ -32,7 +32,7 @@ ; This case was a crasher in constrainLocalCopy. ; The problem was the t2LDR_PRE defining both the global and local lrg. -; CHECK-LABEL: *** Final schedule for BB#5 *** +; CHECK-LABEL: *** Final schedule for %bb.5 *** ; CHECK: %[[R4:[0-9]+]], %[[R1:[0-9]+]] = t2LDR_PRE %[[R1]] ; CHECK: %{{[0-9]+}} = COPY %[[R1]] ; CHECK: %{{[0-9]+}} = COPY %[[R4]] Index: llvm/trunk/test/CodeGen/ARM/negate-i1.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/negate-i1.ll +++ llvm/trunk/test/CodeGen/ARM/negate-i1.ll @@ -4,7 +4,7 @@ define i32 @select_i32_neg1_or_0(i1 %a) { ; CHECK-LABEL: select_i32_neg1_or_0: -; CHECK-NEXT: @ BB#0: +; CHECK-NEXT: @ %bb.0: ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr @@ -15,7 +15,7 @@ define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) { ; CHECK-LABEL: select_i32_neg1_or_0_zeroext: -; CHECK-NEXT: @ BB#0: +; CHECK-NEXT: @ %bb.0: ; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr ; Index: llvm/trunk/test/CodeGen/ARM/neon_vabs.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/neon_vabs.ll +++ llvm/trunk/test/CodeGen/ARM/neon_vabs.ll @@ -3,7 +3,7 @@ define <4 x i32> @test1(<4 x i32> %a) nounwind { ; CHECK-LABEL: test1: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 q8, q8 @@ -18,7 +18,7 @@ define <4 x i32> @test2(<4 x i32> %a) nounwind { ; CHECK-LABEL: test2: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 q8, q8 @@ -33,7 +33,7 @@ define <8 x i16> @test3(<8 x i16> %a) nounwind { ; CHECK-LABEL: test3: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s16 q8, q8 @@ -48,7 +48,7 @@ define <16 x i8> @test4(<16 x i8> %a) nounwind { ; CHECK-LABEL: test4: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s8 q8, q8 @@ -63,7 +63,7 @@ define <4 x i32> @test5(<4 x i32> %a) nounwind { ; CHECK-LABEL: test5: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 q8, q8 @@ -78,7 +78,7 @@ define <2 x i32> @test6(<2 x i32> %a) nounwind { ; CHECK-LABEL: test6: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -91,7 +91,7 @@ define <2 x i32> @test7(<2 x i32> %a) nounwind { ; CHECK-LABEL: test7: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -104,7 +104,7 @@ define <4 x i16> @test8(<4 x i16> %a) nounwind { ; CHECK-LABEL: test8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s16 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -117,7 +117,7 @@ define <8 x i8> @test9(<8 x i8> %a) nounwind { ; CHECK-LABEL: test9: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s8 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -130,7 +130,7 @@ define <2 x i32> @test10(<2 x i32> %a) nounwind { ; CHECK-LABEL: test10: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -146,7 +146,7 @@ define <4 x i32> @test11(<4 x i16> %a, <4 x i16> %b) nounwind { ; CHECK-LABEL: test11: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r2, r3 ; CHECK-NEXT: vmov d17, r0, r1 ; CHECK-NEXT: vabdl.u16 q8, d17, d16 @@ -163,7 +163,7 @@ } define <8 x i16> @test12(<8 x i8> %a, <8 x i8> %b) nounwind { ; CHECK-LABEL: test12: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r2, r3 ; CHECK-NEXT: vmov d17, r0, r1 ; CHECK-NEXT: vabdl.u8 q8, d17, d16 @@ -181,7 +181,7 @@ define <2 x i64> @test13(<2 x i32> %a, <2 x i32> %b) nounwind { ; CHECK-LABEL: test13: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r2, r3 ; CHECK-NEXT: vmov d17, r0, r1 ; CHECK-NEXT: vabdl.u32 q8, d17, d16 Index: llvm/trunk/test/CodeGen/ARM/nest-register.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/nest-register.ll +++ llvm/trunk/test/CodeGen/ARM/nest-register.ll @@ -5,7 +5,7 @@ define i8* @nest_receiver(i8* nest %arg) nounwind { ; CHECK-LABEL: nest_receiver: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r0, r12 ; CHECK-NEXT: mov pc, lr ret i8* %arg Index: llvm/trunk/test/CodeGen/ARM/noopt-dmb-v7.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/noopt-dmb-v7.ll +++ llvm/trunk/test/CodeGen/ARM/noopt-dmb-v7.ll @@ -9,7 +9,7 @@ ret i32 0 } -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: dmb ish ; CHECK-NEXT: dmb ish ; CHECK-NEXT: dmb ish Index: llvm/trunk/test/CodeGen/ARM/select_const.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/select_const.ll +++ llvm/trunk/test/CodeGen/ARM/select_const.ll @@ -8,7 +8,7 @@ define i32 @select_0_or_1(i1 %cond) { ; CHECK-LABEL: select_0_or_1: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #1 ; CHECK-NEXT: bic r0, r1, r0 ; CHECK-NEXT: mov pc, lr @@ -18,7 +18,7 @@ define i32 @select_0_or_1_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_0_or_1_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: eor r0, r0, #1 ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 0, i32 1 @@ -27,7 +27,7 @@ define i32 @select_0_or_1_signext(i1 signext %cond) { ; CHECK-LABEL: select_0_or_1_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #1 ; CHECK-NEXT: bic r0, r1, r0 ; CHECK-NEXT: mov pc, lr @@ -39,7 +39,7 @@ define i32 @select_1_or_0(i1 %cond) { ; CHECK-LABEL: select_1_or_0: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 1, i32 0 @@ -48,7 +48,7 @@ define i32 @select_1_or_0_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_1_or_0_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 1, i32 0 ret i32 %sel @@ -56,7 +56,7 @@ define i32 @select_1_or_0_signext(i1 signext %cond) { ; CHECK-LABEL: select_1_or_0_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 1, i32 0 @@ -67,7 +67,7 @@ define i32 @select_0_or_neg1(i1 %cond) { ; CHECK-LABEL: select_0_or_neg1: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #1 ; CHECK-NEXT: bic r0, r1, r0 ; CHECK-NEXT: rsb r0, r0, #0 @@ -78,7 +78,7 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_0_or_neg1_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: eor r0, r0, #1 ; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr @@ -88,7 +88,7 @@ define i32 @select_0_or_neg1_signext(i1 signext %cond) { ; CHECK-LABEL: select_0_or_neg1_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mvn r0, r0 ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 0, i32 -1 @@ -97,7 +97,7 @@ define i32 @select_0_or_neg1_alt(i1 %cond) { ; CHECK-LABEL: select_0_or_neg1_alt: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: sub r0, r0, #1 ; CHECK-NEXT: mov pc, lr @@ -108,7 +108,7 @@ define i32 @select_0_or_neg1_alt_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_0_or_neg1_alt_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: sub r0, r0, #1 ; CHECK-NEXT: mov pc, lr %z = zext i1 %cond to i32 @@ -118,7 +118,7 @@ define i32 @select_0_or_neg1_alt_signext(i1 signext %cond) { ; CHECK-LABEL: select_0_or_neg1_alt_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mvn r0, r0 ; CHECK-NEXT: mov pc, lr %z = zext i1 %cond to i32 @@ -130,7 +130,7 @@ define i32 @select_neg1_or_0(i1 %cond) { ; CHECK-LABEL: select_neg1_or_0: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr @@ -140,7 +140,7 @@ define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_neg1_or_0_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 -1, i32 0 @@ -149,7 +149,7 @@ define i32 @select_neg1_or_0_signext(i1 signext %cond) { ; CHECK-LABEL: select_neg1_or_0_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 -1, i32 0 ret i32 %sel @@ -159,7 +159,7 @@ define i32 @select_Cplus1_C(i1 %cond) { ; CHECK-LABEL: select_Cplus1_C: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #41 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: movne r1, #42 @@ -171,7 +171,7 @@ define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_Cplus1_C_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #41 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: movne r1, #42 @@ -183,7 +183,7 @@ define i32 @select_Cplus1_C_signext(i1 signext %cond) { ; CHECK-LABEL: select_Cplus1_C_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #41 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: movne r1, #42 @@ -197,7 +197,7 @@ define i32 @select_C_Cplus1(i1 %cond) { ; CHECK-LABEL: select_C_Cplus1: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #42 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: movne r1, #41 @@ -209,7 +209,7 @@ define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_C_Cplus1_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #42 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: movne r1, #41 @@ -221,7 +221,7 @@ define i32 @select_C_Cplus1_signext(i1 signext %cond) { ; CHECK-LABEL: select_C_Cplus1_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #42 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: movne r1, #41 @@ -236,7 +236,7 @@ define i32 @select_C1_C2(i1 %cond) { ; CHECK-LABEL: select_C1_C2: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #165 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: orr r1, r1, #256 @@ -249,7 +249,7 @@ define i32 @select_C1_C2_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_C1_C2_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #165 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: orr r1, r1, #256 @@ -262,7 +262,7 @@ define i32 @select_C1_C2_signext(i1 signext %cond) { ; CHECK-LABEL: select_C1_C2_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #165 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: orr r1, r1, #256 @@ -278,7 +278,7 @@ define i64 @opaque_constant1(i1 %cond, i64 %x) { ; CHECK-LABEL: opaque_constant1: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, lr} ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: mov lr, #1 @@ -310,7 +310,7 @@ define i64 @opaque_constant2(i1 %cond, i64 %x) { ; CHECK-LABEL: opaque_constant2: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #1 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: orr r1, r1, #65536 Index: llvm/trunk/test/CodeGen/ARM/setcc-logic.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/setcc-logic.ll +++ llvm/trunk/test/CodeGen/ARM/setcc-logic.ll @@ -3,7 +3,7 @@ define zeroext i1 @ne_neg1_and_ne_zero(i32 %x) nounwind { ; CHECK-LABEL: ne_neg1_and_ne_zero: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: add r1, r0, #1 ; CHECK-NEXT: mov r0, #0 ; CHECK-NEXT: cmp r1, #1 @@ -19,7 +19,7 @@ define zeroext i1 @and_eq(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { ; CHECK-LABEL: and_eq: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: eor r2, r2, r3 ; CHECK-NEXT: eor r0, r0, r1 ; CHECK-NEXT: orrs r0, r0, r2 @@ -34,7 +34,7 @@ define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { ; CHECK-LABEL: or_ne: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: eor r2, r2, r3 ; CHECK-NEXT: eor r0, r0, r1 ; CHECK-NEXT: orrs r0, r0, r2 @@ -48,7 +48,7 @@ define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) nounwind { ; CHECK-LABEL: and_eq_vec: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r11, lr} ; CHECK-NEXT: push {r11, lr} ; CHECK-NEXT: vmov d19, r2, r3 Index: llvm/trunk/test/CodeGen/ARM/tail-merge-branch-weight.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/tail-merge-branch-weight.ll +++ llvm/trunk/test/CodeGen/ARM/tail-merge-branch-weight.ll @@ -9,9 +9,9 @@ ; = 0.2 * 0.4 + 0.8 * 0.7 = 0.64 ; CHECK: # Machine code for function test0: -; CHECK: Successors according to CFG: BB#{{[0-9]+}}({{[0-9a-fx/= ]+}}20.00%) BB#{{[0-9]+}}({{[0-9a-fx/= ]+}}80.00%) -; CHECK: BB#{{[0-9]+}}: -; CHECK: BB#{{[0-9]+}}: +; CHECK: Successors according to CFG: %bb.{{[0-9]+}}({{[0-9a-fx/= ]+}}20.00%) %bb.{{[0-9]+}}({{[0-9a-fx/= ]+}}80.00%) +; CHECK: %bb.{{[0-9]+}}: +; CHECK: %bb.{{[0-9]+}}: ; CHECK: # End machine code for function test0. define i32 @test0(i32 %n, i32 %m, i32* nocapture %a, i32* nocapture %b) { Index: llvm/trunk/test/CodeGen/ARM/taildup-branch-weight.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/taildup-branch-weight.ll +++ llvm/trunk/test/CodeGen/ARM/taildup-branch-weight.ll @@ -3,7 +3,7 @@ ; RUN: | FileCheck %s ; CHECK: Machine code for function test0: -; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}3.12%) BB#2({{[0-9a-fx/= ]+}}96.88%) +; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}3.12%) %bb.2({{[0-9a-fx/= ]+}}96.88%) define void @test0(i32 %a, i32 %b, i32* %c, i32* %d) { entry: @@ -30,7 +30,7 @@ !0 = !{!"branch_weights", i32 4, i32 124} ; CHECK: Machine code for function test1: -; CHECK: Successors according to CFG: BB#2(0x7c000000 / 0x80000000 = 96.88%) BB#1(0x04000000 / 0x80000000 = 3.12%) +; CHECK: Successors according to CFG: %bb.2(0x7c000000 / 0x80000000 = 96.88%) %bb.1(0x04000000 / 0x80000000 = 3.12%) @g0 = common global i32 0, align 4 Index: llvm/trunk/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll +++ llvm/trunk/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll @@ -30,7 +30,7 @@ unreachable for.cond14.preheader.us.i.i.i: ; preds = %for.inc459.us.i.i.i, %for.cond7.preheader.i.i.preheader.i -; CHECK: @ BB#4 +; CHECK: @ %bb.4 ; CHECK-NEXT: .p2align 2 switch i4 undef, label %func_1.exit.loopexit [ i4 0, label %for.inc459.us.i.i.i Index: llvm/trunk/test/CodeGen/ARM/vbits.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/vbits.ll +++ llvm/trunk/test/CodeGen/ARM/vbits.ll @@ -3,7 +3,7 @@ define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: v_andi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vand d16, d17, d16 @@ -17,7 +17,7 @@ define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: v_andi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vand d16, d17, d16 @@ -31,7 +31,7 @@ define <2 x i32> @v_andi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: v_andi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vand d16, d17, d16 @@ -45,7 +45,7 @@ define <1 x i64> @v_andi64(<1 x i64>* %A, <1 x i64>* %B) nounwind { ; CHECK-LABEL: v_andi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vand d16, d17, d16 @@ -59,7 +59,7 @@ define <16 x i8> @v_andQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: v_andQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vand q8, q9, q8 @@ -74,7 +74,7 @@ define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: v_andQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vand q8, q9, q8 @@ -89,7 +89,7 @@ define <4 x i32> @v_andQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: v_andQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vand q8, q9, q8 @@ -104,7 +104,7 @@ define <2 x i64> @v_andQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { ; CHECK-LABEL: v_andQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vand q8, q9, q8 @@ -119,7 +119,7 @@ define <8 x i8> @v_bici8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: v_bici8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vbic d16, d17, d16 @@ -134,7 +134,7 @@ define <4 x i16> @v_bici16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: v_bici16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vbic d16, d17, d16 @@ -149,7 +149,7 @@ define <2 x i32> @v_bici32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: v_bici32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vbic d16, d17, d16 @@ -164,7 +164,7 @@ define <1 x i64> @v_bici64(<1 x i64>* %A, <1 x i64>* %B) nounwind { ; CHECK-LABEL: v_bici64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vbic d16, d17, d16 @@ -179,7 +179,7 @@ define <16 x i8> @v_bicQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: v_bicQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vbic q8, q9, q8 @@ -195,7 +195,7 @@ define <8 x i16> @v_bicQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: v_bicQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vbic q8, q9, q8 @@ -211,7 +211,7 @@ define <4 x i32> @v_bicQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: v_bicQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vbic q8, q9, q8 @@ -227,7 +227,7 @@ define <2 x i64> @v_bicQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { ; CHECK-LABEL: v_bicQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vbic q8, q9, q8 @@ -243,7 +243,7 @@ define <8 x i8> @v_eori8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: v_eori8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: veor d16, d17, d16 @@ -257,7 +257,7 @@ define <4 x i16> @v_eori16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: v_eori16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: veor d16, d17, d16 @@ -271,7 +271,7 @@ define <2 x i32> @v_eori32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: v_eori32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: veor d16, d17, d16 @@ -285,7 +285,7 @@ define <1 x i64> @v_eori64(<1 x i64>* %A, <1 x i64>* %B) nounwind { ; CHECK-LABEL: v_eori64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: veor d16, d17, d16 @@ -299,7 +299,7 @@ define <16 x i8> @v_eorQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: v_eorQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: veor q8, q9, q8 @@ -314,7 +314,7 @@ define <8 x i16> @v_eorQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: v_eorQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: veor q8, q9, q8 @@ -329,7 +329,7 @@ define <4 x i32> @v_eorQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: v_eorQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: veor q8, q9, q8 @@ -344,7 +344,7 @@ define <2 x i64> @v_eorQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { ; CHECK-LABEL: v_eorQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: veor q8, q9, q8 @@ -359,7 +359,7 @@ define <8 x i8> @v_mvni8(<8 x i8>* %A) nounwind { ; CHECK-LABEL: v_mvni8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vmvn d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -371,7 +371,7 @@ define <4 x i16> @v_mvni16(<4 x i16>* %A) nounwind { ; CHECK-LABEL: v_mvni16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vmvn d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -383,7 +383,7 @@ define <2 x i32> @v_mvni32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: v_mvni32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vmvn d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -395,7 +395,7 @@ define <1 x i64> @v_mvni64(<1 x i64>* %A) nounwind { ; CHECK-LABEL: v_mvni64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vmvn d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -407,7 +407,7 @@ define <16 x i8> @v_mvnQi8(<16 x i8>* %A) nounwind { ; CHECK-LABEL: v_mvnQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmvn q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -420,7 +420,7 @@ define <8 x i16> @v_mvnQi16(<8 x i16>* %A) nounwind { ; CHECK-LABEL: v_mvnQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmvn q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -433,7 +433,7 @@ define <4 x i32> @v_mvnQi32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: v_mvnQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmvn q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -446,7 +446,7 @@ define <2 x i64> @v_mvnQi64(<2 x i64>* %A) nounwind { ; CHECK-LABEL: v_mvnQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmvn q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -459,7 +459,7 @@ define <8 x i8> @v_orri8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: v_orri8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorr d16, d17, d16 @@ -473,7 +473,7 @@ define <4 x i16> @v_orri16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: v_orri16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorr d16, d17, d16 @@ -487,7 +487,7 @@ define <2 x i32> @v_orri32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: v_orri32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorr d16, d17, d16 @@ -501,7 +501,7 @@ define <1 x i64> @v_orri64(<1 x i64>* %A, <1 x i64>* %B) nounwind { ; CHECK-LABEL: v_orri64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorr d16, d17, d16 @@ -515,7 +515,7 @@ define <16 x i8> @v_orrQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: v_orrQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorr q8, q9, q8 @@ -530,7 +530,7 @@ define <8 x i16> @v_orrQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: v_orrQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorr q8, q9, q8 @@ -545,7 +545,7 @@ define <4 x i32> @v_orrQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: v_orrQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorr q8, q9, q8 @@ -560,7 +560,7 @@ define <2 x i64> @v_orrQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { ; CHECK-LABEL: v_orrQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorr q8, q9, q8 @@ -575,7 +575,7 @@ define <8 x i8> @v_orni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: v_orni8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorn d16, d17, d16 @@ -590,7 +590,7 @@ define <4 x i16> @v_orni16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: v_orni16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorn d16, d17, d16 @@ -605,7 +605,7 @@ define <2 x i32> @v_orni32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: v_orni32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorn d16, d17, d16 @@ -620,7 +620,7 @@ define <1 x i64> @v_orni64(<1 x i64>* %A, <1 x i64>* %B) nounwind { ; CHECK-LABEL: v_orni64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorn d16, d17, d16 @@ -635,7 +635,7 @@ define <16 x i8> @v_ornQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: v_ornQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorn q8, q9, q8 @@ -651,7 +651,7 @@ define <8 x i16> @v_ornQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: v_ornQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorn q8, q9, q8 @@ -667,7 +667,7 @@ define <4 x i32> @v_ornQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: v_ornQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorn q8, q9, q8 @@ -683,7 +683,7 @@ define <2 x i64> @v_ornQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { ; CHECK-LABEL: v_ornQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorn q8, q9, q8 @@ -699,7 +699,7 @@ define <8 x i8> @vtsti8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vtsti8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtst.8 d16, d17, d16 @@ -715,7 +715,7 @@ define <4 x i16> @vtsti16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vtsti16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtst.16 d16, d17, d16 @@ -731,7 +731,7 @@ define <2 x i32> @vtsti32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: vtsti32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtst.32 d16, d17, d16 @@ -747,7 +747,7 @@ define <16 x i8> @vtstQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vtstQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtst.8 q8, q9, q8 @@ -764,7 +764,7 @@ define <8 x i16> @vtstQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vtstQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtst.16 q8, q9, q8 @@ -781,7 +781,7 @@ define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vtstQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtst.32 q8, q9, q8 @@ -798,7 +798,7 @@ define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind { ; CHECK-LABEL: v_orrimm: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vorr.i32 d16, #0x1000000 ; CHECK-NEXT: vmov r0, r1, d16 @@ -810,7 +810,7 @@ define <16 x i8> @v_orrimmQ(<16 x i8>* %A) nounwind { ; CHECK-LABEL: v_orrimmQ: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vorr.i32 q8, #0x1000000 ; CHECK-NEXT: vmov r0, r1, d16 @@ -823,7 +823,7 @@ define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind { ; CHECK-LABEL: v_bicimm: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vbic.i32 d16, #0xff000000 ; CHECK-NEXT: vmov r0, r1, d16 @@ -835,7 +835,7 @@ define <16 x i8> @v_bicimmQ(<16 x i8>* %A) nounwind { ; CHECK-LABEL: v_bicimmQ: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vbic.i32 q8, #0xff000000 ; CHECK-NEXT: vmov r0, r1, d16 @@ -848,7 +848,7 @@ define <4 x i32> @hidden_not_v4i32(<4 x i32> %x) nounwind { ; CHECK-LABEL: hidden_not_v4i32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d19, r2, r3 ; CHECK-NEXT: vmov.i32 q8, #0x6 ; CHECK-NEXT: vmov d18, r0, r1 Index: llvm/trunk/test/CodeGen/ARM/vcvt.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/vcvt.ll +++ llvm/trunk/test/CodeGen/ARM/vcvt.ll @@ -3,7 +3,7 @@ define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind { ; CHECK-LABEL: vcvt_f32tos32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.s32.f32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -15,7 +15,7 @@ define <2 x i32> @vcvt_f32tou32(<2 x float>* %A) nounwind { ; CHECK-LABEL: vcvt_f32tou32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.u32.f32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -27,7 +27,7 @@ define <2 x float> @vcvt_s32tof32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vcvt_s32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.f32.s32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -39,7 +39,7 @@ define <2 x float> @vcvt_u32tof32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vcvt_u32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.f32.u32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -51,7 +51,7 @@ define <4 x i32> @vcvtQ_f32tos32(<4 x float>* %A) nounwind { ; CHECK-LABEL: vcvtQ_f32tos32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.s32.f32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -64,7 +64,7 @@ define <4 x i32> @vcvtQ_f32tou32(<4 x float>* %A) nounwind { ; CHECK-LABEL: vcvtQ_f32tou32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.u32.f32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -77,7 +77,7 @@ define <4 x float> @vcvtQ_s32tof32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vcvtQ_s32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.f32.s32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -90,7 +90,7 @@ define <4 x float> @vcvtQ_u32tof32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vcvtQ_u32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.f32.u32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -103,7 +103,7 @@ define <2 x i32> @vcvt_n_f32tos32(<2 x float>* %A) nounwind { ; CHECK-LABEL: vcvt_n_f32tos32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.s32.f32 d16, d16, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -115,7 +115,7 @@ define <2 x i32> @vcvt_n_f32tou32(<2 x float>* %A) nounwind { ; CHECK-LABEL: vcvt_n_f32tou32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.u32.f32 d16, d16, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -127,7 +127,7 @@ define <2 x float> @vcvt_n_s32tof32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vcvt_n_s32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.f32.s32 d16, d16, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -139,7 +139,7 @@ define <2 x float> @vcvt_n_u32tof32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vcvt_n_u32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.f32.u32 d16, d16, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -156,7 +156,7 @@ define <4 x i32> @vcvtQ_n_f32tos32(<4 x float>* %A) nounwind { ; CHECK-LABEL: vcvtQ_n_f32tos32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.s32.f32 q8, q8, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -169,7 +169,7 @@ define <4 x i32> @vcvtQ_n_f32tou32(<4 x float>* %A) nounwind { ; CHECK-LABEL: vcvtQ_n_f32tou32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.u32.f32 q8, q8, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -182,7 +182,7 @@ define <4 x float> @vcvtQ_n_s32tof32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vcvtQ_n_s32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.f32.s32 q8, q8, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -195,7 +195,7 @@ define <4 x float> @vcvtQ_n_u32tof32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vcvtQ_n_u32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.f32.u32 q8, q8, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -213,7 +213,7 @@ define <4 x float> @vcvt_f16tof32(<4 x i16>* %A) nounwind { ; CHECK-LABEL: vcvt_f16tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.f32.f16 q8, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -226,7 +226,7 @@ define <4 x i16> @vcvt_f32tof16(<4 x float>* %A) nounwind { ; CHECK-LABEL: vcvt_f32tof16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.f16.f32 d16, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -242,7 +242,7 @@ define <4 x i16> @fix_float_to_i16(<4 x float> %in) { ; CHECK-LABEL: fix_float_to_i16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vcvt.u32.f32 q8, q8, #1 @@ -257,7 +257,7 @@ define <2 x i64> @fix_float_to_i64(<2 x float> %in) { ; CHECK-LABEL: fix_float_to_i64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, lr} ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: .vsave {d8, d9} @@ -287,7 +287,7 @@ define <4 x i16> @fix_double_to_i16(<4 x double> %in) { ; CHECK-LABEL: fix_double_to_i16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d18, r0, r1 ; CHECK-NEXT: mov r12, sp ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] @@ -319,7 +319,7 @@ define <2 x i64> @fix_double_to_i64(<2 x double> %in) { ; CHECK-LABEL: fix_double_to_i64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, lr} ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: .vsave {d8, d9} @@ -352,7 +352,7 @@ define i32 @multi_sint(double %c, i32* nocapture %p, i32* nocapture %q) { ; CHECK-LABEL: multi_sint: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vcvt.s32.f64 s0, d16 ; CHECK-NEXT: vstr s0, [r2] @@ -369,7 +369,7 @@ define i32 @multi_uint(double %c, i32* nocapture %p, i32* nocapture %q) { ; CHECK-LABEL: multi_uint: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vcvt.u32.f64 s0, d16 ; CHECK-NEXT: vstr s0, [r2] @@ -386,7 +386,7 @@ define void @double_to_sint_store(double %c, i32* nocapture %p) { ; CHECK-LABEL: double_to_sint_store: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vcvt.s32.f64 s0, d16 ; CHECK-NEXT: vstr s0, [r2] @@ -398,7 +398,7 @@ define void @double_to_uint_store(double %c, i32* nocapture %p) { ; CHECK-LABEL: double_to_uint_store: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vcvt.u32.f64 s0, d16 ; CHECK-NEXT: vstr s0, [r2] @@ -410,7 +410,7 @@ define void @float_to_sint_store(float %c, i32* nocapture %p) { ; CHECK-LABEL: float_to_sint_store: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov s0, r0 ; CHECK-NEXT: vcvt.s32.f32 s0, s0 ; CHECK-NEXT: vstr s0, [r1] @@ -422,7 +422,7 @@ define void @float_to_uint_store(float %c, i32* nocapture %p) { ; CHECK-LABEL: float_to_uint_store: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov s0, r0 ; CHECK-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-NEXT: vstr s0, [r1] Index: llvm/trunk/test/CodeGen/ARM/vext.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/vext.ll +++ llvm/trunk/test/CodeGen/ARM/vext.ll @@ -3,7 +3,7 @@ define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextd: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vext.8 d16, d17, d16, #3 @@ -17,7 +17,7 @@ define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextRd: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vext.8 d16, d17, d16, #5 @@ -31,7 +31,7 @@ define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextq: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vext.8 q8, q9, q8, #3 @@ -46,7 +46,7 @@ define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextRq: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vext.8 q8, q9, q8, #7 @@ -61,7 +61,7 @@ define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: test_vextd16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vext.16 d16, d17, d16, #3 @@ -75,7 +75,7 @@ define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: test_vextq32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vext.32 q8, q9, q8, #3 @@ -92,7 +92,7 @@ define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextd_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vext.8 d16, d17, d16, #3 @@ -106,7 +106,7 @@ define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextRq_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vext.8 q8, q9, q8, #7 @@ -121,7 +121,7 @@ define <16 x i8> @test_vextq_undef_op2(<16 x i8> %a) nounwind { ; CHECK-LABEL: test_vextq_undef_op2: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vext.8 q8, q8, q8, #2 @@ -135,7 +135,7 @@ define <8 x i8> @test_vextd_undef_op2(<8 x i8> %a) nounwind { ; CHECK-LABEL: test_vextd_undef_op2: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vext.8 d16, d16, d16, #2 ; CHECK-NEXT: vmov r0, r1, d16 @@ -148,7 +148,7 @@ define <16 x i8> @test_vextq_undef_op2_undef(<16 x i8> %a) nounwind { ; CHECK-LABEL: test_vextq_undef_op2_undef: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vext.8 q8, q8, q8, #2 @@ -162,7 +162,7 @@ define <8 x i8> @test_vextd_undef_op2_undef(<8 x i8> %a) nounwind { ; CHECK-LABEL: test_vextd_undef_op2_undef: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vext.8 d16, d16, d16, #2 ; CHECK-NEXT: vmov r0, r1, d16 @@ -180,7 +180,7 @@ ; Essence: a vext is used on %A and something saner than stack load/store for final result. define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: test_interleaved: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vext.16 d16, d16, d17, #3 ; CHECK-NEXT: vorr d17, d16, d16 @@ -198,7 +198,7 @@ ; An undef in the shuffle list should still be optimizable define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: test_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0, #8] ; CHECK-NEXT: vzip.16 d17, d16 @@ -215,7 +215,7 @@ ; Try to look for fallback to by-element inserts. define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind { ; CHECK-LABEL: test_multisource: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, r0 ; CHECK-NEXT: add r2, r0, #48 ; CHECK-NEXT: add r0, r0, #32 @@ -240,7 +240,7 @@ ; Again, test for fallback to by-element inserts. define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind { ; CHECK-LABEL: test_largespan: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vorr d18, d16, d16 ; CHECK-NEXT: vuzp.16 d18, d17 @@ -258,7 +258,7 @@ ; really important.) define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: test_illegal: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vorr d22, d16, d16 ; CHECK-NEXT: vmov.u16 r0, d16[0] @@ -287,7 +287,7 @@ ; Make sure this doesn't crash define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>* nocapture %dest) nounwind { ; CHECK-LABEL: test_elem_mismatch: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0:128] ; CHECK-NEXT: vmov.32 r0, d16[0] ; CHECK-NEXT: vmov.32 r2, d17[0] @@ -309,7 +309,7 @@ define <4 x i32> @test_reverse_and_extract(<2 x i32>* %A) { ; CHECK-LABEL: test_reverse_and_extract: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vrev64.32 q9, q8 ; CHECK-NEXT: vext.32 q8, q8, q9, #2 @@ -324,7 +324,7 @@ define <4 x i32> @test_dup_and_extract(<2 x i32>* %A) { ; CHECK-LABEL: test_dup_and_extract: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vdup.32 q9, d16[0] ; CHECK-NEXT: vext.32 q8, q9, q8, #2 @@ -339,7 +339,7 @@ define <4 x i32> @test_zip_and_extract(<2 x i32>* %A) { ; CHECK-LABEL: test_zip_and_extract: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vorr q9, q8, q8 ; CHECK-NEXT: vorr q10, q8, q8 Index: llvm/trunk/test/CodeGen/ARM/vpadd.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/vpadd.ll +++ llvm/trunk/test/CodeGen/ARM/vpadd.ll @@ -3,7 +3,7 @@ define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vpaddi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vpadd.i8 d16, d17, d16 @@ -17,7 +17,7 @@ define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vpaddi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vpadd.i16 d16, d17, d16 @@ -31,7 +31,7 @@ define <2 x i32> @vpaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: vpaddi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vpadd.i32 d16, d17, d16 @@ -45,7 +45,7 @@ define <2 x float> @vpaddf32(<2 x float>* %A, <2 x float>* %B) nounwind { ; CHECK-LABEL: vpaddf32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vpadd.f32 d16, d17, d16 @@ -65,7 +65,7 @@ define <4 x i16> @vpaddls8(<8 x i8>* %A) nounwind { ; CHECK-LABEL: vpaddls8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.s8 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -77,7 +77,7 @@ define <2 x i32> @vpaddls16(<4 x i16>* %A) nounwind { ; CHECK-LABEL: vpaddls16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.s16 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -89,7 +89,7 @@ define <1 x i64> @vpaddls32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vpaddls32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.s32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -101,7 +101,7 @@ define <4 x i16> @vpaddlu8(<8 x i8>* %A) nounwind { ; CHECK-LABEL: vpaddlu8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.u8 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -113,7 +113,7 @@ define <2 x i32> @vpaddlu16(<4 x i16>* %A) nounwind { ; CHECK-LABEL: vpaddlu16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.u16 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -125,7 +125,7 @@ define <1 x i64> @vpaddlu32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vpaddlu32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.u32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -137,7 +137,7 @@ define <8 x i16> @vpaddlQs8(<16 x i8>* %A) nounwind { ; CHECK-LABEL: vpaddlQs8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s8 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -150,7 +150,7 @@ define <4 x i32> @vpaddlQs16(<8 x i16>* %A) nounwind { ; CHECK-LABEL: vpaddlQs16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s16 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -163,7 +163,7 @@ define <2 x i64> @vpaddlQs32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vpaddlQs32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -176,7 +176,7 @@ define <8 x i16> @vpaddlQu8(<16 x i8>* %A) nounwind { ; CHECK-LABEL: vpaddlQu8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u8 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -189,7 +189,7 @@ define <4 x i32> @vpaddlQu16(<8 x i16>* %A) nounwind { ; CHECK-LABEL: vpaddlQu16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u16 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -202,7 +202,7 @@ define <2 x i64> @vpaddlQu32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vpaddlQu32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -216,7 +216,7 @@ ; Combine vuzp+vadd->vpadd. define void @addCombineToVPADD_i8(<16 x i8> *%cbcr, <8 x i8> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADD_i8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpadd.i8 d16, d16, d17 ; CHECK-NEXT: vstr d16, [r1] @@ -233,7 +233,7 @@ ; Combine vuzp+vadd->vpadd. define void @addCombineToVPADD_i16(<8 x i16> *%cbcr, <4 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADD_i16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpadd.i16 d16, d16, d17 ; CHECK-NEXT: vstr d16, [r1] @@ -249,7 +249,7 @@ ; Combine vtrn+vadd->vpadd. define void @addCombineToVPADD_i32(<4 x i32> *%cbcr, <2 x i32> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADD_i32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpadd.i32 d16, d16, d17 ; CHECK-NEXT: vstr d16, [r1] @@ -265,7 +265,7 @@ ; Combine vuzp+vaddl->vpaddl define void @addCombineToVPADDLq_s8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_s8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s8 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -284,7 +284,7 @@ ; FIXME: Legalization butchers the shuffles. define void @addCombineToVPADDL_s8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDL_s8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov.i16 d16, #0x8 ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vext.8 d17, d18, d16, #1 @@ -309,7 +309,7 @@ ; Combine vuzp+vaddl->vpaddl define void @addCombineToVPADDLq_u8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_u8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u8 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -328,7 +328,7 @@ ; shuffle is awkward, so this doesn't match at the moment. define void @addCombineToVPADDLq_u8_early_zext(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_u8_early_zext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmovl.u8 q9, d17 ; CHECK-NEXT: vmovl.u8 q8, d16 @@ -349,7 +349,7 @@ ; FIXME: Legalization butchers the shuffle. define void @addCombineToVPADDL_u8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDL_u8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vext.8 d18, d16, d16, #1 ; CHECK-NEXT: vbic.i16 d16, #0xff00 @@ -370,7 +370,7 @@ ; Matching to vpaddl.8 requires matching shuffle(zext()). define void @addCombineToVPADDL_u8_early_zext(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDL_u8_early_zext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmovl.u8 q8, d16 ; CHECK-NEXT: vpadd.i16 d16, d16, d17 @@ -388,7 +388,7 @@ ; Combine vuzp+vaddl->vpaddl define void @addCombineToVPADDLq_s16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_s16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s16 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -406,7 +406,7 @@ ; Combine vuzp+vaddl->vpaddl define void @addCombineToVPADDLq_u16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_u16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u16 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -424,7 +424,7 @@ ; Combine vtrn+vaddl->vpaddl define void @addCombineToVPADDLq_s32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_s32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s32 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -442,7 +442,7 @@ ; Combine vtrn+vaddl->vpaddl define void @addCombineToVPADDLq_u32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_u32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u32 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -460,7 +460,7 @@ ; Legalization promotes the <4 x i8> to <4 x i16>. define <4 x i8> @fromExtendingExtractVectorElt_i8(<8 x i8> %in) { ; CHECK-LABEL: fromExtendingExtractVectorElt_i8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vpaddl.s8 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -474,7 +474,7 @@ ; Legalization promotes the <2 x i16> to <2 x i32>. define <2 x i16> @fromExtendingExtractVectorElt_i16(<4 x i16> %in) { ; CHECK-LABEL: fromExtendingExtractVectorElt_i16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vpaddl.s16 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 Index: llvm/trunk/test/CodeGen/ARM/vtrn.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/vtrn.ll +++ llvm/trunk/test/CodeGen/ARM/vtrn.ll @@ -2,7 +2,7 @@ define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vtrni8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtrn.8 d17, d16 @@ -19,7 +19,7 @@ define <16 x i8> @vtrni8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vtrni8_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] ; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] ; CHECK-NEXT: vtrn.8 [[LDR0]], [[LDR1]] @@ -34,7 +34,7 @@ define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vtrni16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtrn.16 d17, d16 @@ -51,7 +51,7 @@ define <8 x i16> @vtrni16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vtrni16_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] ; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] ; CHECK-NEXT: vtrn.16 [[LDR0]], [[LDR1]] @@ -66,7 +66,7 @@ define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: vtrni32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtrn.32 d17, d16 @@ -83,7 +83,7 @@ define <4 x i32> @vtrni32_Qres(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: vtrni32_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] ; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] ; CHECK-NEXT: vtrn.32 [[LDR0]], [[LDR1]] @@ -98,7 +98,7 @@ define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind { ; CHECK-LABEL: vtrnf: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtrn.32 d17, d16 @@ -115,7 +115,7 @@ define <4 x float> @vtrnf_Qres(<2 x float>* %A, <2 x float>* %B) nounwind { ; CHECK-LABEL: vtrnf_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] ; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] ; CHECK-NEXT: vtrn.32 [[LDR0]], [[LDR1]] @@ -130,7 +130,7 @@ define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vtrnQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtrn.8 q9, q8 @@ -148,7 +148,7 @@ define <32 x i8> @vtrnQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vtrnQi8_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vtrn.8 q9, q8 @@ -163,7 +163,7 @@ define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vtrnQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtrn.16 q9, q8 @@ -181,7 +181,7 @@ define <16 x i16> @vtrnQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vtrnQi16_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vtrn.16 q9, q8 @@ -196,7 +196,7 @@ define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vtrnQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtrn.32 q9, q8 @@ -214,7 +214,7 @@ define <8 x i32> @vtrnQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vtrnQi32_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vtrn.32 q9, q8 @@ -229,7 +229,7 @@ define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vtrnQf: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtrn.32 q9, q8 @@ -247,7 +247,7 @@ define <8 x float> @vtrnQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vtrnQf_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vtrn.32 q9, q8 @@ -263,7 +263,7 @@ define <8 x i8> @vtrni8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vtrni8_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtrn.8 d17, d16 @@ -280,7 +280,7 @@ define <16 x i8> @vtrni8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vtrni8_undef_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] ; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] ; CHECK-NEXT: vtrn.8 [[LDR0]], [[LDR1]] @@ -295,7 +295,7 @@ define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vtrnQi16_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtrn.16 q9, q8 @@ -313,7 +313,7 @@ define <16 x i16> @vtrnQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vtrnQi16_undef_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vtrn.16 q9, q8 @@ -375,7 +375,7 @@ define void @lower_twice_no_vtrn(<4 x i16>* %A, <4 x i16>* %B, <8 x i16>* %C) { entry: ; CHECK-LABEL: lower_twice_no_vtrn: - ; CHECK: @ BB#0: + ; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d18, [r0] ; CHECK-NEXT: vtrn.16 d18, d16 @@ -394,7 +394,7 @@ define void @upper_twice_no_vtrn(<4 x i16>* %A, <4 x i16>* %B, <8 x i16>* %C) { entry: ; CHECK-LABEL: upper_twice_no_vtrn: - ; CHECK: @ BB#0: + ; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d18, [r0] ; CHECK-NEXT: vtrn.16 d18, d16 Index: llvm/trunk/test/CodeGen/ARM/vuzp.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/vuzp.ll +++ llvm/trunk/test/CodeGen/ARM/vuzp.ll @@ -3,7 +3,7 @@ define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vuzp.8 d17, d16 @@ -20,7 +20,7 @@ define <16 x i8> @vuzpi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpi8_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vuzp.8 d16, d17 @@ -35,7 +35,7 @@ define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vuzp.16 d17, d16 @@ -52,7 +52,7 @@ define <8 x i16> @vuzpi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpi16_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vuzp.16 d16, d17 @@ -69,7 +69,7 @@ define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vuzp.8 q9, q8 @@ -87,7 +87,7 @@ define <32 x i8> @vuzpQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpQi8_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vuzp.8 q9, q8 @@ -102,7 +102,7 @@ define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vuzp.16 q9, q8 @@ -120,7 +120,7 @@ define <16 x i16> @vuzpQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpQi16_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vuzp.16 q9, q8 @@ -135,7 +135,7 @@ define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vuzpQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vuzp.32 q9, q8 @@ -153,7 +153,7 @@ define <8 x i32> @vuzpQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vuzpQi32_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vuzp.32 q9, q8 @@ -168,7 +168,7 @@ define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vuzpQf: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vuzp.32 q9, q8 @@ -186,7 +186,7 @@ define <8 x float> @vuzpQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vuzpQf_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vuzp.32 q9, q8 @@ -203,7 +203,7 @@ define <8 x i8> @vuzpi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpi8_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vuzp.8 d17, d16 @@ -220,7 +220,7 @@ define <16 x i8> @vuzpi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpi8_undef_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vuzp.8 d16, d17 @@ -235,7 +235,7 @@ define <8 x i16> @vuzpQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpQi16_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vuzp.16 q9, q8 @@ -253,7 +253,7 @@ define <16 x i16> @vuzpQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpQi16_undef_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vuzp.16 q9, q8 @@ -268,7 +268,7 @@ define <8 x i16> @vuzp_lower_shufflemask_undef(<4 x i16>* %A, <4 x i16>* %B) { ; CHECK-LABEL: vuzp_lower_shufflemask_undef: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vorr q9, q8, q8 @@ -285,7 +285,7 @@ define <4 x i32> @vuzp_lower_shufflemask_zeroed(<2 x i32>* %A, <2 x i32>* %B) { ; CHECK-LABEL: vuzp_lower_shufflemask_zeroed: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vdup.32 q9, d16[0] @@ -303,7 +303,7 @@ define void @vuzp_rev_shufflemask_vtrn(<2 x i32>* %A, <2 x i32>* %B, <4 x i32>* %C) { ; CHECK-LABEL: vuzp_rev_shufflemask_vtrn: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vrev64.32 q9, q8 @@ -323,7 +323,7 @@ ; This results in a build_vector with mismatched types. We will generate two vmovn.i32 instructions to ; truncate from i32 to i16 and one vmovn.i16 to perform the final truncation for i8. ; CHECK-LABEL: cmpsel_trunc: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: add r12, sp, #16 ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] ; CHECK-NEXT: mov r12, sp @@ -352,7 +352,7 @@ ; to perform the vuzp and get the vbsl mask. define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1, ; CHECK-LABEL: vuzp_trunc_and_shuffle: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r11, lr} ; CHECK-NEXT: push {r11, lr} ; CHECK-NEXT: add r12, sp, #8 @@ -388,7 +388,7 @@ ; This produces a build_vector with some of the operands undefs. define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1, ; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_right: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r12, sp ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] ; CHECK-NEXT: add r12, sp, #16 @@ -416,7 +416,7 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1, ; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_left: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r12, sp ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] ; CHECK-NEXT: add r12, sp, #16 @@ -435,7 +435,7 @@ ; CHECK-NEXT: vmov r0, r1, d16 ; CHECK-NEXT: mov pc, lr ; CHECK-NEXT: .p2align 3 -; CHECK-NEXT: @ BB#1: +; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI22_0: ; CHECK-NEXT: .byte 255 @ 0xff ; CHECK-NEXT: .byte 255 @ 0xff @@ -458,7 +458,7 @@ ; get some vector size that we can represent. define <10 x i8> @vuzp_wide_type(<10 x i8> %tr0, <10 x i8> %tr1, ; CHECK-LABEL: vuzp_wide_type: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r10, r11, lr} ; CHECK-NEXT: push {r4, r10, r11, lr} ; CHECK-NEXT: .setfp r11, sp, #8 @@ -517,7 +517,7 @@ ; CHECK-NEXT: pop {r4, r10, r11, lr} ; CHECK-NEXT: mov pc, lr ; CHECK-NEXT: .p2align 3 -; CHECK-NEXT: @ BB#1: +; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI23_0: ; CHECK-NEXT: .byte 0 @ 0x0 ; CHECK-NEXT: .byte 1 @ 0x1 @@ -539,7 +539,7 @@ %struct.uint8x8x2_t = type { [2 x <8 x i8>] } define %struct.uint8x8x2_t @vuzp_extract_subvector(<16 x i8> %t) #0 { ; CHECK-LABEL: vuzp_extract_subvector: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vorr d18, d17, d17 Index: llvm/trunk/test/CodeGen/ARM/vzip.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/vzip.ll +++ llvm/trunk/test/CodeGen/ARM/vzip.ll @@ -3,7 +3,7 @@ define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vzip.8 d17, d16 @@ -20,7 +20,7 @@ define <16 x i8> @vzipi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vzip.8 d16, d17 @@ -35,7 +35,7 @@ define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vzipi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vzip.16 d17, d16 @@ -52,7 +52,7 @@ define <8 x i16> @vzipi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vzipi16_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vzip.16 d16, d17 @@ -69,7 +69,7 @@ define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vzipQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vzip.8 q9, q8 @@ -87,7 +87,7 @@ define <32 x i8> @vzipQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vzipQi8_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vzip.8 q9, q8 @@ -102,7 +102,7 @@ define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vzipQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vzip.16 q9, q8 @@ -120,7 +120,7 @@ define <16 x i16> @vzipQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vzipQi16_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vzip.16 q9, q8 @@ -135,7 +135,7 @@ define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vzipQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vzip.32 q9, q8 @@ -153,7 +153,7 @@ define <8 x i32> @vzipQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vzipQi32_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vzip.32 q9, q8 @@ -168,7 +168,7 @@ define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vzipQf: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vzip.32 q9, q8 @@ -186,7 +186,7 @@ define <8 x float> @vzipQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vzipQf_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vzip.32 q9, q8 @@ -203,7 +203,7 @@ define <8 x i8> @vzipi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vzip.8 d17, d16 @@ -220,7 +220,7 @@ define <16 x i8> @vzipi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8_undef_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vzip.8 d16, d17 @@ -235,7 +235,7 @@ define <16 x i8> @vzipQi8_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vzipQi8_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vzip.8 q9, q8 @@ -253,7 +253,7 @@ define <32 x i8> @vzipQi8_undef_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vzipQi8_undef_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vzip.8 q9, q8 @@ -268,7 +268,7 @@ define <8 x i16> @vzip_lower_shufflemask_undef(<4 x i16>* %A, <4 x i16>* %B) { ; CHECK-LABEL: vzip_lower_shufflemask_undef: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vzip.16 d16, d17 @@ -287,7 +287,7 @@ ; as a vtrn. define <8 x i16> @vzip_lower_shufflemask_undef_rev(<4 x i16>* %A, <4 x i16>* %B) { ; CHECK-LABEL: vzip_lower_shufflemask_undef_rev: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d19, [r0] ; CHECK-NEXT: vtrn.16 d19, d16 @@ -303,7 +303,7 @@ define <4 x i32> @vzip_lower_shufflemask_zeroed(<2 x i32>* %A) { ; CHECK-LABEL: vzip_lower_shufflemask_zeroed: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vdup.32 q9, d16[0] ; CHECK-NEXT: vzip.32 q8, q9 @@ -318,7 +318,7 @@ define <4 x i32> @vzip_lower_shufflemask_vuzp(<2 x i32>* %A) { ; CHECK-LABEL: vzip_lower_shufflemask_vuzp: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vdup.32 q9, d16[0] ; CHECK-NEXT: vzip.32 q8, q9 @@ -333,7 +333,7 @@ define void @vzip_undef_rev_shufflemask_vtrn(<2 x i32>* %A, <4 x i32>* %B) { ; CHECK-LABEL: vzip_undef_rev_shufflemask_vtrn: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vorr q9, q8, q8 ; CHECK-NEXT: vzip.32 q8, q9 @@ -349,7 +349,7 @@ define void @vzip_vext_factor(<8 x i16>* %A, <4 x i16>* %B) { ; CHECK-LABEL: vzip_vext_factor: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vext.16 d18, d16, d17, #1 ; CHECK-NEXT: vext.16 d16, d18, d17, #2 @@ -365,7 +365,7 @@ define <8 x i8> @vdup_zip(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; CHECK-LABEL: vdup_zip: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vld1.8 {d16[]}, [r1] ; CHECK-NEXT: vld1.8 {d17[]}, [r0] ; CHECK-NEXT: vzip.8 d17, d16 Index: llvm/trunk/test/CodeGen/AVR/atomics/fence.ll =================================================================== --- llvm/trunk/test/CodeGen/AVR/atomics/fence.ll +++ llvm/trunk/test/CodeGen/AVR/atomics/fence.ll @@ -4,7 +4,7 @@ ; AVR is always singlethreaded so fences do nothing. ; CHECK_LABEL: atomic_fence8 -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: ret define void @atomic_fence8() { fence acquire Index: llvm/trunk/test/CodeGen/AVR/select-must-add-unconditional-jump.ll =================================================================== --- llvm/trunk/test/CodeGen/AVR/select-must-add-unconditional-jump.ll +++ llvm/trunk/test/CodeGen/AVR/select-must-add-unconditional-jump.ll @@ -9,18 +9,18 @@ ; ; This issue manifests in a CFG that looks something like this: ; -; BB#2: derived from LLVM BB %finish -; Predecessors according to CFG: BB#0 BB#1 -; %0 = PHI %3, , %5, +; %bb.2: derived from LLVM BB %finish +; Predecessors according to CFG: %bb.0 %bb.1 +; %0 = PHI %3, <%bb.0>, %5, <%bb.1> ; %7 = LDIRdK 2 ; %8 = LDIRdK 1 ; CPRdRr %2, %0, %SREG -; BREQk , %SREG -; Successors according to CFG: BB#5(?%) BB#6(?%) +; BREQk <%bb.6>, %SREG +; Successors according to CFG: %bb.5(?%) %bb.6(?%) ; -; The code assumes it the fallthrough block after this is BB#5, but -; it's actually BB#3! To be proper, there should be an unconditional -; jump tying this block to BB#5. +; The code assumes it the fallthrough block after this is %bb.5, but +; it's actually %bb.3! To be proper, there should be an unconditional +; jump tying this block to %bb.5. define i8 @select_must_add_unconditional_jump(i8 %arg0, i8 %arg1) unnamed_addr { entry-block: @@ -49,10 +49,10 @@ ; basic block containing `select` needs to contain explicit jumps to ; both successors. -; CHECK: BB#2: derived from LLVM BB %finish -; CHECK: BREQk <[[BRANCHED:BB#[0-9]+]]> -; CHECK: RJMPk <[[DIRECT:BB#[0-9]+]]> +; CHECK: %bb.2: derived from LLVM BB %finish +; CHECK: BREQk <[[BRANCHED:%bb.[0-9]+]]> +; CHECK: RJMPk <[[DIRECT:%bb.[0-9]+]]> ; CHECK: Successors according to CFG ; CHECK-SAME-DAG: {{.*}}[[BRANCHED]] ; CHECK-SAME-DAG: {{.*}}[[DIRECT]] -; CHECK: BB#3: derived from LLVM BB +; CHECK: %bb.3: derived from LLVM BB Index: llvm/trunk/test/CodeGen/Generic/MachineBranchProb.ll =================================================================== --- llvm/trunk/test/CodeGen/Generic/MachineBranchProb.ll +++ llvm/trunk/test/CodeGen/Generic/MachineBranchProb.ll @@ -21,14 +21,14 @@ i64 5, label %sw.bb1 i64 15, label %sw.bb ], !prof !0 -; CHECK: BB#0: derived from LLVM BB %entry -; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}92.17%) BB#4({{[0-9a-fx/= ]+}}7.83%) -; CHECK: BB#4: derived from LLVM BB %entry -; CHECK: Successors according to CFG: BB#2({{[0-9a-fx/= ]+}}75.29%) BB#5({{[0-9a-fx/= ]+}}24.71%) -; CHECK: BB#5: derived from LLVM BB %entry -; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}47.62%) BB#6({{[0-9a-fx/= ]+}}52.38%) -; CHECK: BB#6: derived from LLVM BB %entry -; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}36.36%) BB#3({{[0-9a-fx/= ]+}}63.64%) +; CHECK: %bb.0: derived from LLVM BB %entry +; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}92.17%) %bb.4({{[0-9a-fx/= ]+}}7.83%) +; CHECK: %bb.4: derived from LLVM BB %entry +; CHECK: Successors according to CFG: %bb.2({{[0-9a-fx/= ]+}}75.29%) %bb.5({{[0-9a-fx/= ]+}}24.71%) +; CHECK: %bb.5: derived from LLVM BB %entry +; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}47.62%) %bb.6({{[0-9a-fx/= ]+}}52.38%) +; CHECK: %bb.6: derived from LLVM BB %entry +; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}36.36%) %bb.3({{[0-9a-fx/= ]+}}63.64%) sw.bb: ; this call will prevent simplifyCFG from optimizing the block away in ARM/AArch64. @@ -70,9 +70,9 @@ ; right with weight 20. ; ; CHECK-LABEL: Machine code for function left_leaning_weight_balanced_tree: -; CHECK: BB#0: derived from LLVM BB %entry +; CHECK: %bb.0: derived from LLVM BB %entry ; CHECK-NOT: Successors -; CHECK: Successors according to CFG: BB#8({{[0-9a-fx/= ]+}}39.71%) BB#9({{[0-9a-fx/= ]+}}60.29%) +; CHECK: Successors according to CFG: %bb.8({{[0-9a-fx/= ]+}}39.71%) %bb.9({{[0-9a-fx/= ]+}}60.29%) } !1 = !{!"branch_weights", Index: llvm/trunk/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir =================================================================== --- llvm/trunk/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir +++ llvm/trunk/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir @@ -11,7 +11,7 @@ # then created code, where the first predicated instruction has incorrect # implicit use of r0: # -# BB#0: +# %bb.0: # Live Ins: %R0 # %R1 = A2_sxth %R0 ; hoisted, kills r0 # A2_nop %P0 Index: llvm/trunk/test/CodeGen/Hexagon/hwloop-redef-imm.mir =================================================================== --- llvm/trunk/test/CodeGen/Hexagon/hwloop-redef-imm.mir +++ llvm/trunk/test/CodeGen/Hexagon/hwloop-redef-imm.mir @@ -8,10 +8,10 @@ # loop setup in the preheader). # CHECK: [[R0:%[0-9]+]]:intregs = A2_tfrsi 1920 -# CHECK: J2_loop0r %bb.1.b1, [[R0]] +# CHECK: J2_loop0r %bb.1, [[R0]] # # CHECK: bb.1.b1 (address-taken): -# CHECK: ENDLOOP0 %bb.1.b1 +# CHECK: ENDLOOP0 %bb.1 --- | Index: llvm/trunk/test/CodeGen/Hexagon/ifcvt-edge-weight.ll =================================================================== --- llvm/trunk/test/CodeGen/Hexagon/ifcvt-edge-weight.ll +++ llvm/trunk/test/CodeGen/Hexagon/ifcvt-edge-weight.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -hexagon-eif=0 -print-machineinstrs=if-converter %s -o /dev/null 2>&1 | FileCheck %s ; Check that the edge weights are updated correctly after if-conversion. -; CHECK: BB#3: -; CHECK: Successors according to CFG: BB#2({{[0-9a-fx/= ]+}}10.00%) BB#1({{[0-9a-fx/= ]+}}90.00%) +; CHECK: %bb.3: +; CHECK: Successors according to CFG: %bb.2({{[0-9a-fx/= ]+}}10.00%) %bb.1({{[0-9a-fx/= ]+}}90.00%) @a = external global i32 @d = external global i32 Index: llvm/trunk/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir =================================================================== --- llvm/trunk/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir +++ llvm/trunk/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir @@ -30,33 +30,33 @@ - { reg: '%edi' } - { reg: '%esi' } # CHECK: frameInfo: -# CHECK: savePoint: '%bb.2.true' -# CHECK-NEXT: restorePoint: '%bb.2.true' +# CHECK: savePoint: '%bb.2' +# CHECK-NEXT: restorePoint: '%bb.2' # CHECK: stack frameInfo: maxAlignment: 4 hasCalls: true - savePoint: '%bb.2.true' - restorePoint: '%bb.2.true' + savePoint: '%bb.2' + restorePoint: '%bb.2' stack: - { id: 0, name: tmp, offset: 0, size: 4, alignment: 4 } body: | bb.0: - successors: %bb.2.true, %bb.1 + successors: %bb.2, %bb.1 liveins: %edi, %esi %eax = COPY %edi CMP32rr %eax, killed %esi, implicit-def %eflags - JL_1 %bb.2.true, implicit killed %eflags + JL_1 %bb.2, implicit killed %eflags bb.1: - successors: %bb.3.false + successors: %bb.3 liveins: %eax - JMP_1 %bb.3.false + JMP_1 %bb.3 bb.2.true: - successors: %bb.3.false + successors: %bb.3 liveins: %eax MOV32mr %stack.0.tmp, 1, _, 0, _, killed %eax Index: llvm/trunk/test/CodeGen/MIR/X86/implicit-register-flag.mir =================================================================== --- llvm/trunk/test/CodeGen/MIR/X86/implicit-register-flag.mir +++ llvm/trunk/test/CodeGen/MIR/X86/implicit-register-flag.mir @@ -31,11 +31,11 @@ name: foo body: | bb.0.entry: - successors: %bb.1.less, %bb.2.exit + successors: %bb.1, %bb.2 ; CHECK: CMP32ri8 %edi, 10, implicit-def %eflags - ; CHECK-NEXT: JG_1 %bb.2.exit, implicit %eflags + ; CHECK-NEXT: JG_1 %bb.2, implicit %eflags CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit %eflags + JG_1 %bb.2, implicit %eflags bb.1.less: ; CHECK: %eax = MOV32r0 implicit-def %eflags Index: llvm/trunk/test/CodeGen/MIR/X86/jump-table-info.mir =================================================================== --- llvm/trunk/test/CodeGen/MIR/X86/jump-table-info.mir +++ llvm/trunk/test/CodeGen/MIR/X86/jump-table-info.mir @@ -61,23 +61,23 @@ # CHECK-NEXT: kind: label-difference32 # CHECK-NEXT: entries: # CHECK-NEXT: - id: 0 -# CHECK-NEXT: blocks: [ '%bb.3.lbl1', '%bb.4.lbl2', '%bb.5.lbl3', '%bb.6.lbl4' ] +# CHECK-NEXT: blocks: [ '%bb.3', '%bb.4', '%bb.5', '%bb.6' ] # CHECK-NEXT: body: jumpTable: kind: label-difference32 entries: - id: 0 - blocks: [ '%bb.3.lbl1', '%bb.4.lbl2', '%bb.5.lbl3', '%bb.6.lbl4' ] + blocks: [ '%bb.3', '%bb.4', '%bb.5', '%bb.6' ] body: | bb.0.entry: - successors: %bb.2.def, %bb.1.entry + successors: %bb.2, %bb.1 %eax = MOV32rr %edi, implicit-def %rax CMP32ri8 %edi, 3, implicit-def %eflags - JA_1 %bb.2.def, implicit %eflags + JA_1 %bb.2, implicit %eflags bb.1.entry: - successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4 + successors: %bb.3, %bb.4, %bb.5, %bb.6 ; CHECK: %rcx = LEA64r %rip, 1, %noreg, %jump-table.0, %noreg %rcx = LEA64r %rip, 1, _, %jump-table.0, _ %rax = MOVSX64rm32 %rcx, 4, %rax, 0, _ @@ -110,17 +110,17 @@ kind: label-difference32 entries: - id: 1 - blocks: [ '%bb.3.lbl1', '%bb.4.lbl2', '%bb.5.lbl3', '%bb.6.lbl4' ] + blocks: [ '%bb.3', '%bb.4', '%bb.5', '%bb.6' ] body: | bb.0.entry: - successors: %bb.2.def, %bb.1.entry + successors: %bb.2, %bb.1 %eax = MOV32rr %edi, implicit-def %rax CMP32ri8 %edi, 3, implicit-def %eflags - JA_1 %bb.2.def, implicit %eflags + JA_1 %bb.2, implicit %eflags bb.1.entry: - successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4 + successors: %bb.3, %bb.4, %bb.5, %bb.6 ; Verify that the printer will use an id of 0 for this jump table: ; CHECK: %rcx = LEA64r %rip, 1, %noreg, %jump-table.0, %noreg %rcx = LEA64r %rip, 1, _, %jump-table.1, _ Index: llvm/trunk/test/CodeGen/MIR/X86/machine-basic-block-operands.mir =================================================================== --- llvm/trunk/test/CodeGen/MIR/X86/machine-basic-block-operands.mir +++ llvm/trunk/test/CodeGen/MIR/X86/machine-basic-block-operands.mir @@ -36,13 +36,13 @@ body: | ; CHECK: bb.0.entry bb.0.entry: - successors: %bb.1.less, %bb.2.exit + successors: %bb.1, %bb.2 %eax = MOV32rm %rdi, 1, _, 0, _ ; CHECK: CMP32ri8 %eax, 10 - ; CHECK-NEXT: JG_1 %bb.2.exit + ; CHECK-NEXT: JG_1 %bb.2 CMP32ri8 %eax, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit %eflags + JG_1 %bb.2, implicit %eflags ; CHECK: bb.1.less: bb.1.less: Index: llvm/trunk/test/CodeGen/MIR/X86/newline-handling.mir =================================================================== --- llvm/trunk/test/CodeGen/MIR/X86/newline-handling.mir +++ llvm/trunk/test/CodeGen/MIR/X86/newline-handling.mir @@ -35,10 +35,10 @@ # CHECK-LABEL: name: foo # CHECK: body: | # CHECK-NEXT: bb.0.entry: -# CHECK-NEXT: successors: %bb.1.less(0x40000000), %bb.2.exit(0x40000000) +# CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) # CHECK-NEXT: liveins: %edi # CHECK: CMP32ri8 %edi, 10, implicit-def %eflags -# CHECK-NEXT: JG_1 %bb.2.exit, implicit killed %eflags +# CHECK-NEXT: JG_1 %bb.2, implicit killed %eflags # CHECK: bb.1.less: # CHECK-NEXT: %eax = MOV32r0 implicit-def dead %eflags @@ -50,13 +50,13 @@ # CHECK-NEXT: RETQ killed %eax body: | bb.0.entry: - successors: %bb.1.less, %bb.2.exit + successors: %bb.1, %bb.2 liveins: %edi CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit killed %eflags + JG_1 %bb.2, implicit killed %eflags bb.1.less: @@ -79,10 +79,10 @@ # CHECK-LABEL: name: bar # CHECK: body: | # CHECK-NEXT: bb.0.entry: -# CHECK-NEXT: successors: %bb.1.less(0x40000000), %bb.2.exit(0x40000000) +# CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) # CHECK-NEXT: liveins: %edi # CHECK: CMP32ri8 %edi, 10, implicit-def %eflags -# CHECK-NEXT: JG_1 %bb.2.exit, implicit killed %eflags +# CHECK-NEXT: JG_1 %bb.2, implicit killed %eflags # CHECK: bb.1.less: # CHECK-NEXT: %eax = MOV32r0 implicit-def dead %eflags @@ -95,10 +95,10 @@ body: | bb.0.entry: - successors: %bb.1.less, %bb.2.exit + successors: %bb.1, %bb.2 liveins: %edi CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit killed %eflags + JG_1 %bb.2, implicit killed %eflags bb.1.less: %eax = MOV32r0 implicit-def dead %eflags RETQ killed %eax Index: llvm/trunk/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir =================================================================== --- llvm/trunk/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir +++ llvm/trunk/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir @@ -21,14 +21,14 @@ name: foo body: | ; CHECK-LABEL: bb.0.entry: - ; CHECK: successors: %bb.1.less(0x2a3d70a4), %bb.2.exit(0x55c28f5c) + ; CHECK: successors: %bb.1(0x2a3d70a4), %bb.2(0x55c28f5c) ; CHECK-LABEL: bb.1.less: bb.0.entry: - successors: %bb.1.less (33), %bb.2.exit(67) + successors: %bb.1 (33), %bb.2(67) liveins: %edi CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit killed %eflags + JG_1 %bb.2, implicit killed %eflags bb.1.less: %eax = MOV32r0 implicit-def dead %eflags Index: llvm/trunk/test/CodeGen/MSP430/BranchSelector.ll =================================================================== --- llvm/trunk/test/CodeGen/MSP430/BranchSelector.ll +++ llvm/trunk/test/CodeGen/MSP430/BranchSelector.ll @@ -579,7 +579,7 @@ ; This branch should not be expanded ; CHECK-LABEL: .LBB1_1: ; CHECK: jeq .LBB1_1 -; CHECK: BB#2: +; CHECK: %bb.2: ; CHECK: ret br i1 %lnot, label %begin, label %end Index: llvm/trunk/test/CodeGen/Mips/compactbranches/empty-block.mir =================================================================== --- llvm/trunk/test/CodeGen/Mips/compactbranches/empty-block.mir +++ llvm/trunk/test/CodeGen/Mips/compactbranches/empty-block.mir @@ -5,11 +5,11 @@ # CHECK: blezc # CHECK: nop -# CHECK: # BB#1: +# CHECK: # %bb.1: # CHECK: .insn -# CHECK: # BB#2: +# CHECK: # %bb.2: # CHECK: .insn -# CHECK: # BB#3: +# CHECK: # %bb.3: # CHECK: jal --- | Index: llvm/trunk/test/CodeGen/Mips/lcb4a.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/lcb4a.ll +++ llvm/trunk/test/CodeGen/Mips/lcb4a.ll @@ -26,7 +26,7 @@ } ; ci: beqz $3, $BB0_2 -; ci: # BB#1: # %if.else +; ci: # %bb.1: # %if.else ; Function Attrs: nounwind optsize Index: llvm/trunk/test/CodeGen/Mips/prevent-hoisting.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/prevent-hoisting.ll +++ llvm/trunk/test/CodeGen/Mips/prevent-hoisting.ll @@ -16,7 +16,7 @@ ; CHECK: sll ; Check that at the start of a fallthrough block there is a instruction that writes to $1. -; CHECK: {{BB[0-9_#]+}}: +; CHECK: {{%bb.[0-9]+}}: ; CHECK: sll $1, $[[R0:[0-9]+]], 4 ; CHECK: lw $[[R1:[0-9]+]], %got(assignSE2partition)($[[R2:[0-9]+]]) Index: llvm/trunk/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll +++ llvm/trunk/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll @@ -6,7 +6,7 @@ define i32 @test(i32 %i) { ; CHECK-LABEL: test: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: addis 4, 2, .LC0@toc@ha ; CHECK-NEXT: extsw 3, 3 ; CHECK-NEXT: addis 5, 2, .LC1@toc@ha Index: llvm/trunk/test/CodeGen/PowerPC/addegluecrash.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/addegluecrash.ll +++ llvm/trunk/test/CodeGen/PowerPC/addegluecrash.ll @@ -5,7 +5,7 @@ define void @bn_mul_comba8(i64* nocapture %r, i64* nocapture readonly %a, i64* nocapture readonly %b) { ; CHECK-LABEL: bn_mul_comba8: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: ld 6, 0(4) ; CHECK-NEXT: ld 7, 0(5) ; CHECK-NEXT: mulhdu 8, 7, 6 Index: llvm/trunk/test/CodeGen/PowerPC/andc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/andc.ll +++ llvm/trunk/test/CodeGen/PowerPC/andc.ll @@ -3,7 +3,7 @@ define i1 @and_cmp1(i32 %x, i32 %y) { ; CHECK-LABEL: and_cmp1: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: andc 3, 4, 3 ; CHECK-NEXT: cntlzw 3, 3 ; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31 @@ -15,7 +15,7 @@ define i1 @and_cmp_const(i32 %x) { ; CHECK-LABEL: and_cmp_const: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: li 4, 43 ; CHECK-NEXT: andc 3, 4, 3 ; CHECK-NEXT: cntlzw 3, 3 @@ -28,7 +28,7 @@ define i1 @foo(i32 %i) { ; CHECK-LABEL: foo: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lis 4, 4660 ; CHECK-NEXT: ori 4, 4, 22136 ; CHECK-NEXT: andc 3, 4, 3 @@ -42,7 +42,7 @@ define <4 x i32> @hidden_not_v4i32(<4 x i32> %x) { ; CHECK-LABEL: hidden_not_v4i32: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, 6 ; CHECK-NEXT: xxlandc 34, 35, 34 ; CHECK-NEXT: blr Index: llvm/trunk/test/CodeGen/PowerPC/atomics-constant.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/atomics-constant.ll +++ llvm/trunk/test/CodeGen/PowerPC/atomics-constant.ll @@ -7,7 +7,7 @@ define i64 @foo() { ; CHECK-LABEL: foo: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis 3, 2, .LC0@toc@ha ; CHECK-NEXT: li 4, 0 ; CHECK-NEXT: ld 3, .LC0@toc@l(3) Index: llvm/trunk/test/CodeGen/PowerPC/atomics-regression.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/atomics-regression.ll +++ llvm/trunk/test/CodeGen/PowerPC/atomics-regression.ll @@ -3,7 +3,7 @@ define i8 @test0(i8* %ptr) { ; PPC64LE-LABEL: test0: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lbz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i8, i8* %ptr unordered, align 1 @@ -12,7 +12,7 @@ define i8 @test1(i8* %ptr) { ; PPC64LE-LABEL: test1: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lbz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i8, i8* %ptr monotonic, align 1 @@ -21,7 +21,7 @@ define i8 @test2(i8* %ptr) { ; PPC64LE-LABEL: test2: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lbz 3, 0(3) ; PPC64LE-NEXT: cmpd 7, 3, 3 ; PPC64LE-NEXT: bne- 7, .+4 @@ -33,7 +33,7 @@ define i8 @test3(i8* %ptr) { ; PPC64LE-LABEL: test3: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: lbz 3, 0(3) @@ -47,7 +47,7 @@ define i16 @test4(i16* %ptr) { ; PPC64LE-LABEL: test4: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lhz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i16, i16* %ptr unordered, align 2 @@ -56,7 +56,7 @@ define i16 @test5(i16* %ptr) { ; PPC64LE-LABEL: test5: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lhz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i16, i16* %ptr monotonic, align 2 @@ -65,7 +65,7 @@ define i16 @test6(i16* %ptr) { ; PPC64LE-LABEL: test6: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lhz 3, 0(3) ; PPC64LE-NEXT: cmpd 7, 3, 3 ; PPC64LE-NEXT: bne- 7, .+4 @@ -77,7 +77,7 @@ define i16 @test7(i16* %ptr) { ; PPC64LE-LABEL: test7: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: lhz 3, 0(3) @@ -91,7 +91,7 @@ define i32 @test8(i32* %ptr) { ; PPC64LE-LABEL: test8: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i32, i32* %ptr unordered, align 4 @@ -100,7 +100,7 @@ define i32 @test9(i32* %ptr) { ; PPC64LE-LABEL: test9: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i32, i32* %ptr monotonic, align 4 @@ -109,7 +109,7 @@ define i32 @test10(i32* %ptr) { ; PPC64LE-LABEL: test10: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwz 3, 0(3) ; PPC64LE-NEXT: cmpd 7, 3, 3 ; PPC64LE-NEXT: bne- 7, .+4 @@ -121,7 +121,7 @@ define i32 @test11(i32* %ptr) { ; PPC64LE-LABEL: test11: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: lwz 3, 0(3) @@ -135,7 +135,7 @@ define i64 @test12(i64* %ptr) { ; PPC64LE-LABEL: test12: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: ld 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i64, i64* %ptr unordered, align 8 @@ -144,7 +144,7 @@ define i64 @test13(i64* %ptr) { ; PPC64LE-LABEL: test13: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: ld 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i64, i64* %ptr monotonic, align 8 @@ -153,7 +153,7 @@ define i64 @test14(i64* %ptr) { ; PPC64LE-LABEL: test14: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: ld 3, 0(3) ; PPC64LE-NEXT: cmpd 7, 3, 3 ; PPC64LE-NEXT: bne- 7, .+4 @@ -165,7 +165,7 @@ define i64 @test15(i64* %ptr) { ; PPC64LE-LABEL: test15: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: ld 3, 0(3) @@ -179,7 +179,7 @@ define void @test16(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test16: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: stb 4, 0(3) ; PPC64LE-NEXT: blr store atomic i8 %val, i8* %ptr unordered, align 1 @@ -188,7 +188,7 @@ define void @test17(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test17: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: stb 4, 0(3) ; PPC64LE-NEXT: blr store atomic i8 %val, i8* %ptr monotonic, align 1 @@ -197,7 +197,7 @@ define void @test18(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test18: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: stb 4, 0(3) ; PPC64LE-NEXT: blr @@ -207,7 +207,7 @@ define void @test19(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test19: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: stb 4, 0(3) ; PPC64LE-NEXT: blr @@ -217,7 +217,7 @@ define void @test20(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test20: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sth 4, 0(3) ; PPC64LE-NEXT: blr store atomic i16 %val, i16* %ptr unordered, align 2 @@ -226,7 +226,7 @@ define void @test21(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test21: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sth 4, 0(3) ; PPC64LE-NEXT: blr store atomic i16 %val, i16* %ptr monotonic, align 2 @@ -235,7 +235,7 @@ define void @test22(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test22: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: sth 4, 0(3) ; PPC64LE-NEXT: blr @@ -245,7 +245,7 @@ define void @test23(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test23: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: sth 4, 0(3) ; PPC64LE-NEXT: blr @@ -255,7 +255,7 @@ define void @test24(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test24: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: stw 4, 0(3) ; PPC64LE-NEXT: blr store atomic i32 %val, i32* %ptr unordered, align 4 @@ -264,7 +264,7 @@ define void @test25(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test25: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: stw 4, 0(3) ; PPC64LE-NEXT: blr store atomic i32 %val, i32* %ptr monotonic, align 4 @@ -273,7 +273,7 @@ define void @test26(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test26: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: stw 4, 0(3) ; PPC64LE-NEXT: blr @@ -283,7 +283,7 @@ define void @test27(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test27: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: stw 4, 0(3) ; PPC64LE-NEXT: blr @@ -293,7 +293,7 @@ define void @test28(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test28: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: std 4, 0(3) ; PPC64LE-NEXT: blr store atomic i64 %val, i64* %ptr unordered, align 8 @@ -302,7 +302,7 @@ define void @test29(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test29: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: std 4, 0(3) ; PPC64LE-NEXT: blr store atomic i64 %val, i64* %ptr monotonic, align 8 @@ -311,7 +311,7 @@ define void @test30(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test30: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: std 4, 0(3) ; PPC64LE-NEXT: blr @@ -321,7 +321,7 @@ define void @test31(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test31: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: std 4, 0(3) ; PPC64LE-NEXT: blr @@ -331,7 +331,7 @@ define void @test32() { ; PPC64LE-LABEL: test32: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence acquire @@ -340,7 +340,7 @@ define void @test33() { ; PPC64LE-LABEL: test33: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence release @@ -349,7 +349,7 @@ define void @test34() { ; PPC64LE-LABEL: test34: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence acq_rel @@ -358,7 +358,7 @@ define void @test35() { ; PPC64LE-LABEL: test35: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: blr fence seq_cst @@ -367,7 +367,7 @@ define void @test36() { ; PPC64LE-LABEL: test36: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence syncscope("singlethread") acquire @@ -376,7 +376,7 @@ define void @test37() { ; PPC64LE-LABEL: test37: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence syncscope("singlethread") release @@ -385,7 +385,7 @@ define void @test38() { ; PPC64LE-LABEL: test38: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence syncscope("singlethread") acq_rel @@ -394,7 +394,7 @@ define void @test39() { ; PPC64LE-LABEL: test39: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: blr fence syncscope("singlethread") seq_cst @@ -403,7 +403,7 @@ define void @test40(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test40: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB40_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB40_1: @@ -413,7 +413,7 @@ ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB40_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic @@ -422,15 +422,15 @@ define void @test41(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test41: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB41_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB41_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB41_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB41_4: @@ -443,15 +443,15 @@ define void @test42(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test42: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB42_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB42_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB42_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB42_4: @@ -464,7 +464,7 @@ define void @test43(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test43: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB43_2 ; PPC64LE-NEXT: .p2align 5 @@ -475,7 +475,7 @@ ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB43_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic @@ -484,7 +484,7 @@ define void @test44(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test44: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB44_2 ; PPC64LE-NEXT: .p2align 5 @@ -495,7 +495,7 @@ ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB44_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire @@ -504,16 +504,16 @@ define void @test45(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test45: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB45_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB45_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB45_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB45_4: @@ -526,16 +526,16 @@ define void @test46(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test46: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB46_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB46_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB46_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB46_4: @@ -548,16 +548,16 @@ define void @test47(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test47: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB47_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB47_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB47_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB47_4: @@ -570,16 +570,16 @@ define void @test48(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test48: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB48_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB48_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB48_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB48_4: @@ -592,16 +592,16 @@ define void @test49(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test49: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB49_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB49_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB49_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB49_4: @@ -614,7 +614,7 @@ define void @test50(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test50: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB50_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB50_1: @@ -624,7 +624,7 @@ ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB50_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic @@ -633,15 +633,15 @@ define void @test51(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test51: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB51_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB51_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB51_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB51_4: @@ -654,15 +654,15 @@ define void @test52(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test52: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB52_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB52_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB52_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB52_4: @@ -675,7 +675,7 @@ define void @test53(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test53: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB53_2 ; PPC64LE-NEXT: .p2align 5 @@ -686,7 +686,7 @@ ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB53_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic @@ -695,7 +695,7 @@ define void @test54(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test54: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB54_2 ; PPC64LE-NEXT: .p2align 5 @@ -706,7 +706,7 @@ ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB54_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire @@ -715,16 +715,16 @@ define void @test55(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test55: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB55_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB55_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB55_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB55_4: @@ -737,16 +737,16 @@ define void @test56(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test56: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB56_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB56_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB56_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB56_4: @@ -759,16 +759,16 @@ define void @test57(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test57: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB57_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB57_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB57_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB57_4: @@ -781,16 +781,16 @@ define void @test58(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test58: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB58_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB58_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB58_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB58_4: @@ -803,16 +803,16 @@ define void @test59(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test59: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB59_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB59_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB59_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB59_4: @@ -825,7 +825,7 @@ define void @test60(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test60: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB60_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB60_1: @@ -835,7 +835,7 @@ ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB60_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic @@ -844,15 +844,15 @@ define void @test61(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test61: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB61_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB61_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB61_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB61_4: @@ -865,15 +865,15 @@ define void @test62(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test62: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB62_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB62_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB62_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB62_4: @@ -886,7 +886,7 @@ define void @test63(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test63: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB63_2 ; PPC64LE-NEXT: .p2align 5 @@ -897,7 +897,7 @@ ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB63_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic @@ -906,7 +906,7 @@ define void @test64(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test64: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB64_2 ; PPC64LE-NEXT: .p2align 5 @@ -917,7 +917,7 @@ ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB64_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire @@ -926,16 +926,16 @@ define void @test65(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test65: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB65_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB65_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB65_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB65_4: @@ -948,16 +948,16 @@ define void @test66(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test66: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB66_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB66_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB66_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB66_4: @@ -970,16 +970,16 @@ define void @test67(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test67: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB67_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB67_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB67_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB67_4: @@ -992,16 +992,16 @@ define void @test68(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test68: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB68_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB68_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB68_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB68_4: @@ -1014,16 +1014,16 @@ define void @test69(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test69: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB69_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB69_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB69_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB69_4: @@ -1036,7 +1036,7 @@ define void @test70(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test70: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB70_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB70_1: @@ -1046,7 +1046,7 @@ ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB70_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic @@ -1055,15 +1055,15 @@ define void @test71(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test71: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB71_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB71_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB71_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB71_4: @@ -1076,15 +1076,15 @@ define void @test72(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test72: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB72_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB72_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB72_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB72_4: @@ -1097,7 +1097,7 @@ define void @test73(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test73: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB73_2 ; PPC64LE-NEXT: .p2align 5 @@ -1108,7 +1108,7 @@ ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB73_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic @@ -1117,7 +1117,7 @@ define void @test74(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test74: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB74_2 ; PPC64LE-NEXT: .p2align 5 @@ -1128,7 +1128,7 @@ ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB74_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire @@ -1137,16 +1137,16 @@ define void @test75(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test75: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB75_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB75_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB75_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB75_4: @@ -1159,16 +1159,16 @@ define void @test76(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test76: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB76_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB76_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB76_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB76_4: @@ -1181,16 +1181,16 @@ define void @test77(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test77: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB77_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB77_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB77_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB77_4: @@ -1203,16 +1203,16 @@ define void @test78(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test78: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB78_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB78_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB78_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB78_4: @@ -1225,16 +1225,16 @@ define void @test79(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test79: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB79_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB79_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB79_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB79_4: @@ -1247,7 +1247,7 @@ define void @test80(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test80: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB80_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB80_1: @@ -1257,7 +1257,7 @@ ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB80_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") monotonic monotonic @@ -1266,15 +1266,15 @@ define void @test81(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test81: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB81_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB81_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB81_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB81_4: @@ -1287,15 +1287,15 @@ define void @test82(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test82: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB82_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB82_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB82_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB82_4: @@ -1308,7 +1308,7 @@ define void @test83(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test83: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB83_2 ; PPC64LE-NEXT: .p2align 5 @@ -1319,7 +1319,7 @@ ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB83_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release monotonic @@ -1328,7 +1328,7 @@ define void @test84(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test84: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB84_2 ; PPC64LE-NEXT: .p2align 5 @@ -1339,7 +1339,7 @@ ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB84_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release acquire @@ -1348,16 +1348,16 @@ define void @test85(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test85: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB85_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB85_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB85_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB85_4: @@ -1370,16 +1370,16 @@ define void @test86(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test86: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB86_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB86_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB86_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB86_4: @@ -1392,16 +1392,16 @@ define void @test87(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test87: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB87_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB87_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB87_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB87_4: @@ -1414,16 +1414,16 @@ define void @test88(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test88: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB88_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB88_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB88_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB88_4: @@ -1436,16 +1436,16 @@ define void @test89(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test89: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB89_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB89_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB89_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB89_4: @@ -1458,7 +1458,7 @@ define void @test90(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test90: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB90_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB90_1: @@ -1468,7 +1468,7 @@ ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB90_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") monotonic monotonic @@ -1477,15 +1477,15 @@ define void @test91(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test91: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB91_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB91_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB91_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB91_4: @@ -1498,15 +1498,15 @@ define void @test92(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test92: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB92_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB92_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB92_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB92_4: @@ -1519,7 +1519,7 @@ define void @test93(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test93: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB93_2 ; PPC64LE-NEXT: .p2align 5 @@ -1530,7 +1530,7 @@ ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB93_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release monotonic @@ -1539,7 +1539,7 @@ define void @test94(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test94: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB94_2 ; PPC64LE-NEXT: .p2align 5 @@ -1550,7 +1550,7 @@ ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB94_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release acquire @@ -1559,16 +1559,16 @@ define void @test95(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test95: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB95_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB95_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB95_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB95_4: @@ -1581,16 +1581,16 @@ define void @test96(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test96: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB96_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB96_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB96_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB96_4: @@ -1603,16 +1603,16 @@ define void @test97(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test97: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB97_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB97_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB97_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB97_4: @@ -1625,16 +1625,16 @@ define void @test98(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test98: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB98_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB98_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB98_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB98_4: @@ -1647,16 +1647,16 @@ define void @test99(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test99: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB99_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB99_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB99_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB99_4: @@ -1669,7 +1669,7 @@ define void @test100(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test100: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB100_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB100_1: @@ -1679,7 +1679,7 @@ ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB100_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") monotonic monotonic @@ -1688,15 +1688,15 @@ define void @test101(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test101: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB101_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB101_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB101_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB101_4: @@ -1709,15 +1709,15 @@ define void @test102(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test102: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB102_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB102_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB102_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB102_4: @@ -1730,7 +1730,7 @@ define void @test103(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test103: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB103_2 ; PPC64LE-NEXT: .p2align 5 @@ -1741,7 +1741,7 @@ ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB103_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release monotonic @@ -1750,7 +1750,7 @@ define void @test104(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test104: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB104_2 ; PPC64LE-NEXT: .p2align 5 @@ -1761,7 +1761,7 @@ ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB104_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release acquire @@ -1770,16 +1770,16 @@ define void @test105(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test105: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB105_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB105_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB105_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB105_4: @@ -1792,16 +1792,16 @@ define void @test106(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test106: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB106_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB106_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB106_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB106_4: @@ -1814,16 +1814,16 @@ define void @test107(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test107: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB107_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB107_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB107_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB107_4: @@ -1836,16 +1836,16 @@ define void @test108(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test108: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB108_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB108_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB108_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB108_4: @@ -1858,16 +1858,16 @@ define void @test109(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test109: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB109_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB109_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB109_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB109_4: @@ -1880,7 +1880,7 @@ define void @test110(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test110: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB110_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB110_1: @@ -1890,7 +1890,7 @@ ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB110_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") monotonic monotonic @@ -1899,15 +1899,15 @@ define void @test111(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test111: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB111_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB111_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB111_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB111_4: @@ -1920,15 +1920,15 @@ define void @test112(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test112: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB112_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB112_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB112_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB112_4: @@ -1941,7 +1941,7 @@ define void @test113(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test113: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB113_2 ; PPC64LE-NEXT: .p2align 5 @@ -1952,7 +1952,7 @@ ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB113_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release monotonic @@ -1961,7 +1961,7 @@ define void @test114(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test114: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB114_2 ; PPC64LE-NEXT: .p2align 5 @@ -1972,7 +1972,7 @@ ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB114_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release acquire @@ -1981,16 +1981,16 @@ define void @test115(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test115: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB115_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB115_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB115_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB115_4: @@ -2003,16 +2003,16 @@ define void @test116(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test116: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB116_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB116_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB116_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB116_4: @@ -2025,16 +2025,16 @@ define void @test117(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test117: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB117_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB117_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB117_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB117_4: @@ -2047,16 +2047,16 @@ define void @test118(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test118: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB118_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB118_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB118_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB118_4: @@ -2069,16 +2069,16 @@ define void @test119(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test119: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB119_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB119_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB119_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB119_4: @@ -2091,12 +2091,12 @@ define i8 @test120(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test120: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB120_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB120_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val monotonic @@ -2105,13 +2105,13 @@ define i8 @test121(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test121: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB121_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB121_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val acquire @@ -2120,13 +2120,13 @@ define i8 @test122(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test122: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB122_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB122_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val release @@ -2135,13 +2135,13 @@ define i8 @test123(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test123: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB123_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB123_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2151,13 +2151,13 @@ define i8 @test124(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test124: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB124_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB124_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2167,12 +2167,12 @@ define i16 @test125(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test125: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB125_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB125_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val monotonic @@ -2181,13 +2181,13 @@ define i16 @test126(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test126: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB126_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB126_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val acquire @@ -2196,13 +2196,13 @@ define i16 @test127(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test127: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB127_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB127_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val release @@ -2211,13 +2211,13 @@ define i16 @test128(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test128: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB128_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB128_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2227,13 +2227,13 @@ define i16 @test129(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test129: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB129_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB129_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2243,12 +2243,12 @@ define i32 @test130(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test130: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB130_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB130_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val monotonic @@ -2257,13 +2257,13 @@ define i32 @test131(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test131: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB131_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB131_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val acquire @@ -2272,13 +2272,13 @@ define i32 @test132(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test132: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB132_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB132_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val release @@ -2287,13 +2287,13 @@ define i32 @test133(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test133: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB133_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB133_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2303,13 +2303,13 @@ define i32 @test134(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test134: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB134_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB134_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2319,12 +2319,12 @@ define i64 @test135(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test135: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB135_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB135_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val monotonic @@ -2333,13 +2333,13 @@ define i64 @test136(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test136: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB136_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB136_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val acquire @@ -2348,13 +2348,13 @@ define i64 @test137(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test137: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB137_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB137_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val release @@ -2363,13 +2363,13 @@ define i64 @test138(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test138: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB138_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB138_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2379,13 +2379,13 @@ define i64 @test139(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test139: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB139_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB139_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2395,13 +2395,13 @@ define i8 @test140(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test140: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB140_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB140_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val monotonic @@ -2410,14 +2410,14 @@ define i8 @test141(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test141: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB141_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB141_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val acquire @@ -2426,14 +2426,14 @@ define i8 @test142(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test142: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB142_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB142_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val release @@ -2442,14 +2442,14 @@ define i8 @test143(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test143: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB143_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB143_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2459,14 +2459,14 @@ define i8 @test144(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test144: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB144_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB144_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2476,13 +2476,13 @@ define i16 @test145(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test145: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB145_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB145_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val monotonic @@ -2491,14 +2491,14 @@ define i16 @test146(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test146: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB146_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB146_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val acquire @@ -2507,14 +2507,14 @@ define i16 @test147(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test147: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB147_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB147_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val release @@ -2523,14 +2523,14 @@ define i16 @test148(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test148: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB148_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB148_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2540,14 +2540,14 @@ define i16 @test149(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test149: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB149_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB149_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2557,13 +2557,13 @@ define i32 @test150(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test150: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB150_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB150_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val monotonic @@ -2572,14 +2572,14 @@ define i32 @test151(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test151: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB151_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB151_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val acquire @@ -2588,14 +2588,14 @@ define i32 @test152(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test152: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB152_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB152_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val release @@ -2604,14 +2604,14 @@ define i32 @test153(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test153: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB153_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB153_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2621,14 +2621,14 @@ define i32 @test154(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test154: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB154_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB154_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2638,13 +2638,13 @@ define i64 @test155(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test155: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB155_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB155_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val monotonic @@ -2653,14 +2653,14 @@ define i64 @test156(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test156: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB156_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB156_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val acquire @@ -2669,14 +2669,14 @@ define i64 @test157(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test157: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB157_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB157_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val release @@ -2685,14 +2685,14 @@ define i64 @test158(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test158: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB158_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB158_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2702,14 +2702,14 @@ define i64 @test159(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test159: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB159_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB159_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2719,13 +2719,13 @@ define i8 @test160(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test160: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB160_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB160_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val monotonic @@ -2734,14 +2734,14 @@ define i8 @test161(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test161: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB161_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB161_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val acquire @@ -2750,14 +2750,14 @@ define i8 @test162(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test162: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB162_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB162_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val release @@ -2766,14 +2766,14 @@ define i8 @test163(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test163: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB163_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB163_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2783,14 +2783,14 @@ define i8 @test164(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test164: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB164_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB164_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2800,13 +2800,13 @@ define i16 @test165(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test165: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB165_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB165_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val monotonic @@ -2815,14 +2815,14 @@ define i16 @test166(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test166: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB166_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB166_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val acquire @@ -2831,14 +2831,14 @@ define i16 @test167(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test167: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB167_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB167_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val release @@ -2847,14 +2847,14 @@ define i16 @test168(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test168: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB168_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB168_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2864,14 +2864,14 @@ define i16 @test169(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test169: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB169_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB169_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2881,13 +2881,13 @@ define i32 @test170(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test170: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB170_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB170_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val monotonic @@ -2896,14 +2896,14 @@ define i32 @test171(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test171: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB171_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB171_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val acquire @@ -2912,14 +2912,14 @@ define i32 @test172(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test172: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB172_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB172_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val release @@ -2928,14 +2928,14 @@ define i32 @test173(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test173: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB173_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB173_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2945,14 +2945,14 @@ define i32 @test174(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test174: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB174_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB174_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2962,13 +2962,13 @@ define i64 @test175(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test175: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB175_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB175_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val monotonic @@ -2977,14 +2977,14 @@ define i64 @test176(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test176: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB176_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: sub 6, 3, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB176_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val acquire @@ -2993,14 +2993,14 @@ define i64 @test177(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test177: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB177_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB177_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val release @@ -3009,14 +3009,14 @@ define i64 @test178(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test178: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB178_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB178_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3026,14 +3026,14 @@ define i64 @test179(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test179: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB179_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB179_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3043,13 +3043,13 @@ define i8 @test180(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test180: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB180_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB180_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val monotonic @@ -3058,14 +3058,14 @@ define i8 @test181(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test181: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB181_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB181_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val acquire @@ -3074,14 +3074,14 @@ define i8 @test182(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test182: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB182_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB182_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val release @@ -3090,14 +3090,14 @@ define i8 @test183(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test183: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB183_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB183_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3107,14 +3107,14 @@ define i8 @test184(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test184: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB184_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB184_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3124,13 +3124,13 @@ define i16 @test185(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test185: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB185_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB185_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val monotonic @@ -3139,14 +3139,14 @@ define i16 @test186(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test186: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB186_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB186_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val acquire @@ -3155,14 +3155,14 @@ define i16 @test187(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test187: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB187_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB187_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val release @@ -3171,14 +3171,14 @@ define i16 @test188(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test188: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB188_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB188_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3188,14 +3188,14 @@ define i16 @test189(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test189: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB189_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB189_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3205,13 +3205,13 @@ define i32 @test190(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test190: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB190_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB190_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val monotonic @@ -3220,14 +3220,14 @@ define i32 @test191(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test191: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB191_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB191_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val acquire @@ -3236,14 +3236,14 @@ define i32 @test192(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test192: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB192_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB192_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val release @@ -3252,14 +3252,14 @@ define i32 @test193(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test193: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB193_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB193_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3269,14 +3269,14 @@ define i32 @test194(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test194: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB194_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB194_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3286,13 +3286,13 @@ define i64 @test195(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test195: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB195_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB195_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val monotonic @@ -3301,14 +3301,14 @@ define i64 @test196(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test196: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB196_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB196_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val acquire @@ -3317,14 +3317,14 @@ define i64 @test197(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test197: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB197_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB197_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val release @@ -3333,14 +3333,14 @@ define i64 @test198(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test198: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB198_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB198_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3350,14 +3350,14 @@ define i64 @test199(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test199: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB199_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB199_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3367,13 +3367,13 @@ define i8 @test200(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test200: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB200_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB200_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val monotonic @@ -3382,14 +3382,14 @@ define i8 @test201(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test201: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB201_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB201_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val acquire @@ -3398,14 +3398,14 @@ define i8 @test202(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test202: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB202_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB202_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val release @@ -3414,14 +3414,14 @@ define i8 @test203(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test203: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB203_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB203_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3431,14 +3431,14 @@ define i8 @test204(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test204: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB204_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB204_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3448,13 +3448,13 @@ define i16 @test205(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test205: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB205_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB205_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val monotonic @@ -3463,14 +3463,14 @@ define i16 @test206(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test206: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB206_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB206_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val acquire @@ -3479,14 +3479,14 @@ define i16 @test207(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test207: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB207_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB207_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val release @@ -3495,14 +3495,14 @@ define i16 @test208(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test208: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB208_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB208_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3512,14 +3512,14 @@ define i16 @test209(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test209: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB209_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB209_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3529,13 +3529,13 @@ define i32 @test210(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test210: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB210_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB210_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val monotonic @@ -3544,14 +3544,14 @@ define i32 @test211(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test211: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB211_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB211_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val acquire @@ -3560,14 +3560,14 @@ define i32 @test212(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test212: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB212_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB212_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val release @@ -3576,14 +3576,14 @@ define i32 @test213(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test213: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB213_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB213_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3593,14 +3593,14 @@ define i32 @test214(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test214: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB214_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB214_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3610,13 +3610,13 @@ define i64 @test215(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test215: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB215_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB215_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val monotonic @@ -3625,14 +3625,14 @@ define i64 @test216(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test216: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB216_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB216_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val acquire @@ -3641,14 +3641,14 @@ define i64 @test217(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test217: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB217_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB217_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val release @@ -3657,14 +3657,14 @@ define i64 @test218(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test218: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB218_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB218_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3674,14 +3674,14 @@ define i64 @test219(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test219: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB219_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB219_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3691,13 +3691,13 @@ define i8 @test220(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test220: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB220_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB220_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val monotonic @@ -3706,14 +3706,14 @@ define i8 @test221(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test221: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB221_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB221_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val acquire @@ -3722,14 +3722,14 @@ define i8 @test222(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test222: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB222_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB222_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val release @@ -3738,14 +3738,14 @@ define i8 @test223(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test223: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB223_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB223_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3755,14 +3755,14 @@ define i8 @test224(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test224: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB224_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB224_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3772,13 +3772,13 @@ define i16 @test225(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test225: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB225_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB225_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val monotonic @@ -3787,14 +3787,14 @@ define i16 @test226(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test226: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB226_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB226_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val acquire @@ -3803,14 +3803,14 @@ define i16 @test227(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test227: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB227_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB227_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val release @@ -3819,14 +3819,14 @@ define i16 @test228(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test228: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB228_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB228_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3836,14 +3836,14 @@ define i16 @test229(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test229: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB229_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB229_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3853,13 +3853,13 @@ define i32 @test230(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test230: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB230_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB230_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val monotonic @@ -3868,14 +3868,14 @@ define i32 @test231(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test231: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB231_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB231_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val acquire @@ -3884,14 +3884,14 @@ define i32 @test232(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test232: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB232_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB232_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val release @@ -3900,14 +3900,14 @@ define i32 @test233(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test233: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB233_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB233_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3917,14 +3917,14 @@ define i32 @test234(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test234: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB234_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB234_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3934,13 +3934,13 @@ define i64 @test235(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test235: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB235_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB235_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val monotonic @@ -3949,14 +3949,14 @@ define i64 @test236(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test236: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB236_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB236_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val acquire @@ -3965,14 +3965,14 @@ define i64 @test237(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test237: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB237_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB237_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val release @@ -3981,14 +3981,14 @@ define i64 @test238(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test238: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB238_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB238_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3998,14 +3998,14 @@ define i64 @test239(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test239: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB239_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB239_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4015,13 +4015,13 @@ define i8 @test240(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test240: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB240_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB240_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val monotonic @@ -4030,14 +4030,14 @@ define i8 @test241(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test241: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB241_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB241_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val acquire @@ -4046,14 +4046,14 @@ define i8 @test242(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test242: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB242_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB242_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val release @@ -4062,14 +4062,14 @@ define i8 @test243(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test243: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB243_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB243_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4079,14 +4079,14 @@ define i8 @test244(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test244: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB244_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB244_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4096,13 +4096,13 @@ define i16 @test245(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test245: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB245_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB245_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val monotonic @@ -4111,14 +4111,14 @@ define i16 @test246(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test246: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB246_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB246_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val acquire @@ -4127,14 +4127,14 @@ define i16 @test247(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test247: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB247_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB247_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val release @@ -4143,14 +4143,14 @@ define i16 @test248(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test248: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB248_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB248_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4160,14 +4160,14 @@ define i16 @test249(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test249: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB249_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB249_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4177,13 +4177,13 @@ define i32 @test250(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test250: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB250_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB250_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val monotonic @@ -4192,14 +4192,14 @@ define i32 @test251(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test251: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB251_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB251_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val acquire @@ -4208,14 +4208,14 @@ define i32 @test252(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test252: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB252_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB252_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val release @@ -4224,14 +4224,14 @@ define i32 @test253(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test253: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB253_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB253_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4241,14 +4241,14 @@ define i32 @test254(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test254: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB254_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB254_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4258,13 +4258,13 @@ define i64 @test255(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test255: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB255_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB255_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val monotonic @@ -4273,14 +4273,14 @@ define i64 @test256(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test256: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB256_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB256_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val acquire @@ -4289,14 +4289,14 @@ define i64 @test257(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test257: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB257_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB257_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val release @@ -4305,14 +4305,14 @@ define i64 @test258(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test258: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB258_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB258_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4322,14 +4322,14 @@ define i64 @test259(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test259: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB259_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB259_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4339,13 +4339,13 @@ define i8 @test260(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test260: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB260_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB260_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB260_1 ; PPC64LE-NEXT: .LBB260_3: @@ -4357,14 +4357,14 @@ define i8 @test261(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test261: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB261_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: extsb 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB261_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB261_1 ; PPC64LE-NEXT: .LBB261_3: @@ -4376,14 +4376,14 @@ define i8 @test262(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test262: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB262_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB262_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB262_1 ; PPC64LE-NEXT: .LBB262_3: @@ -4395,14 +4395,14 @@ define i8 @test263(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test263: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB263_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB263_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB263_1 ; PPC64LE-NEXT: .LBB263_3: @@ -4415,14 +4415,14 @@ define i8 @test264(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test264: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB264_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB264_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB264_1 ; PPC64LE-NEXT: .LBB264_3: @@ -4435,13 +4435,13 @@ define i16 @test265(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test265: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB265_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB265_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB265_1 ; PPC64LE-NEXT: .LBB265_3: @@ -4453,14 +4453,14 @@ define i16 @test266(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test266: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB266_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: extsh 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB266_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB266_1 ; PPC64LE-NEXT: .LBB266_3: @@ -4472,14 +4472,14 @@ define i16 @test267(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test267: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB267_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB267_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB267_1 ; PPC64LE-NEXT: .LBB267_3: @@ -4491,14 +4491,14 @@ define i16 @test268(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test268: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB268_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB268_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB268_1 ; PPC64LE-NEXT: .LBB268_3: @@ -4511,14 +4511,14 @@ define i16 @test269(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test269: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB269_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB269_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB269_1 ; PPC64LE-NEXT: .LBB269_3: @@ -4531,12 +4531,12 @@ define i32 @test270(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test270: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB270_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB270_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB270_1 ; PPC64LE-NEXT: .LBB270_3: @@ -4548,13 +4548,13 @@ define i32 @test271(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test271: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB271_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmpw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB271_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB271_1 ; PPC64LE-NEXT: .LBB271_3: @@ -4566,13 +4566,13 @@ define i32 @test272(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test272: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB272_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB272_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB272_1 ; PPC64LE-NEXT: .LBB272_3: @@ -4584,13 +4584,13 @@ define i32 @test273(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test273: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB273_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB273_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB273_1 ; PPC64LE-NEXT: .LBB273_3: @@ -4603,13 +4603,13 @@ define i32 @test274(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test274: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB274_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB274_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB274_1 ; PPC64LE-NEXT: .LBB274_3: @@ -4622,12 +4622,12 @@ define i64 @test275(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test275: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB275_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB275_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB275_1 ; PPC64LE-NEXT: .LBB275_3: @@ -4639,13 +4639,13 @@ define i64 @test276(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test276: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB276_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpd 4, 3 ; PPC64LE-NEXT: ble 0, .LBB276_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB276_1 ; PPC64LE-NEXT: .LBB276_3: @@ -4657,13 +4657,13 @@ define i64 @test277(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test277: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB277_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB277_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB277_1 ; PPC64LE-NEXT: .LBB277_3: @@ -4675,13 +4675,13 @@ define i64 @test278(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test278: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB278_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB278_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB278_1 ; PPC64LE-NEXT: .LBB278_3: @@ -4694,13 +4694,13 @@ define i64 @test279(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test279: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB279_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB279_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB279_1 ; PPC64LE-NEXT: .LBB279_3: @@ -4713,13 +4713,13 @@ define i8 @test280(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test280: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB280_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB280_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB280_1 ; PPC64LE-NEXT: .LBB280_3: @@ -4731,14 +4731,14 @@ define i8 @test281(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test281: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB281_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: extsb 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB281_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB281_1 ; PPC64LE-NEXT: .LBB281_3: @@ -4750,14 +4750,14 @@ define i8 @test282(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test282: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB282_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB282_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB282_1 ; PPC64LE-NEXT: .LBB282_3: @@ -4769,14 +4769,14 @@ define i8 @test283(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test283: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB283_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB283_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB283_1 ; PPC64LE-NEXT: .LBB283_3: @@ -4789,14 +4789,14 @@ define i8 @test284(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test284: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB284_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB284_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB284_1 ; PPC64LE-NEXT: .LBB284_3: @@ -4809,13 +4809,13 @@ define i16 @test285(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test285: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB285_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB285_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB285_1 ; PPC64LE-NEXT: .LBB285_3: @@ -4827,14 +4827,14 @@ define i16 @test286(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test286: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB286_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: extsh 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB286_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB286_1 ; PPC64LE-NEXT: .LBB286_3: @@ -4846,14 +4846,14 @@ define i16 @test287(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test287: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB287_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB287_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB287_1 ; PPC64LE-NEXT: .LBB287_3: @@ -4865,14 +4865,14 @@ define i16 @test288(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test288: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB288_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB288_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB288_1 ; PPC64LE-NEXT: .LBB288_3: @@ -4885,14 +4885,14 @@ define i16 @test289(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test289: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB289_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB289_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB289_1 ; PPC64LE-NEXT: .LBB289_3: @@ -4905,12 +4905,12 @@ define i32 @test290(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test290: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB290_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB290_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB290_1 ; PPC64LE-NEXT: .LBB290_3: @@ -4922,13 +4922,13 @@ define i32 @test291(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test291: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB291_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmpw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB291_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB291_1 ; PPC64LE-NEXT: .LBB291_3: @@ -4940,13 +4940,13 @@ define i32 @test292(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test292: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB292_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB292_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB292_1 ; PPC64LE-NEXT: .LBB292_3: @@ -4958,13 +4958,13 @@ define i32 @test293(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test293: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB293_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB293_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB293_1 ; PPC64LE-NEXT: .LBB293_3: @@ -4977,13 +4977,13 @@ define i32 @test294(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test294: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB294_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB294_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB294_1 ; PPC64LE-NEXT: .LBB294_3: @@ -4996,12 +4996,12 @@ define i64 @test295(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test295: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB295_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB295_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB295_1 ; PPC64LE-NEXT: .LBB295_3: @@ -5013,13 +5013,13 @@ define i64 @test296(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test296: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB296_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpd 4, 3 ; PPC64LE-NEXT: bge 0, .LBB296_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB296_1 ; PPC64LE-NEXT: .LBB296_3: @@ -5031,13 +5031,13 @@ define i64 @test297(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test297: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB297_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB297_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB297_1 ; PPC64LE-NEXT: .LBB297_3: @@ -5049,13 +5049,13 @@ define i64 @test298(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test298: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB298_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB298_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB298_1 ; PPC64LE-NEXT: .LBB298_3: @@ -5068,13 +5068,13 @@ define i64 @test299(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test299: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB299_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB299_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB299_1 ; PPC64LE-NEXT: .LBB299_3: @@ -5087,12 +5087,12 @@ define i8 @test300(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test300: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB300_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB300_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB300_1 ; PPC64LE-NEXT: .LBB300_3: @@ -5104,13 +5104,13 @@ define i8 @test301(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test301: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB301_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB301_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB301_1 ; PPC64LE-NEXT: .LBB301_3: @@ -5122,13 +5122,13 @@ define i8 @test302(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test302: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB302_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB302_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB302_1 ; PPC64LE-NEXT: .LBB302_3: @@ -5140,13 +5140,13 @@ define i8 @test303(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test303: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB303_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB303_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB303_1 ; PPC64LE-NEXT: .LBB303_3: @@ -5159,13 +5159,13 @@ define i8 @test304(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test304: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB304_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB304_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB304_1 ; PPC64LE-NEXT: .LBB304_3: @@ -5178,12 +5178,12 @@ define i16 @test305(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test305: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB305_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB305_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB305_1 ; PPC64LE-NEXT: .LBB305_3: @@ -5195,13 +5195,13 @@ define i16 @test306(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test306: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB306_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB306_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB306_1 ; PPC64LE-NEXT: .LBB306_3: @@ -5213,13 +5213,13 @@ define i16 @test307(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test307: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB307_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB307_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB307_1 ; PPC64LE-NEXT: .LBB307_3: @@ -5231,13 +5231,13 @@ define i16 @test308(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test308: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB308_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB308_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB308_1 ; PPC64LE-NEXT: .LBB308_3: @@ -5250,13 +5250,13 @@ define i16 @test309(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test309: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB309_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB309_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB309_1 ; PPC64LE-NEXT: .LBB309_3: @@ -5269,12 +5269,12 @@ define i32 @test310(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test310: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB310_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB310_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB310_1 ; PPC64LE-NEXT: .LBB310_3: @@ -5286,13 +5286,13 @@ define i32 @test311(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test311: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB311_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB311_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB311_1 ; PPC64LE-NEXT: .LBB311_3: @@ -5304,13 +5304,13 @@ define i32 @test312(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test312: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB312_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB312_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB312_1 ; PPC64LE-NEXT: .LBB312_3: @@ -5322,13 +5322,13 @@ define i32 @test313(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test313: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB313_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB313_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB313_1 ; PPC64LE-NEXT: .LBB313_3: @@ -5341,13 +5341,13 @@ define i32 @test314(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test314: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB314_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB314_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB314_1 ; PPC64LE-NEXT: .LBB314_3: @@ -5360,12 +5360,12 @@ define i64 @test315(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test315: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB315_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB315_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB315_1 ; PPC64LE-NEXT: .LBB315_3: @@ -5377,13 +5377,13 @@ define i64 @test316(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test316: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB316_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpld 4, 3 ; PPC64LE-NEXT: ble 0, .LBB316_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB316_1 ; PPC64LE-NEXT: .LBB316_3: @@ -5395,13 +5395,13 @@ define i64 @test317(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test317: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB317_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB317_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB317_1 ; PPC64LE-NEXT: .LBB317_3: @@ -5413,13 +5413,13 @@ define i64 @test318(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test318: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB318_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB318_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB318_1 ; PPC64LE-NEXT: .LBB318_3: @@ -5432,13 +5432,13 @@ define i64 @test319(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test319: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB319_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB319_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB319_1 ; PPC64LE-NEXT: .LBB319_3: @@ -5451,12 +5451,12 @@ define i8 @test320(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test320: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB320_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB320_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB320_1 ; PPC64LE-NEXT: .LBB320_3: @@ -5468,13 +5468,13 @@ define i8 @test321(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test321: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB321_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB321_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB321_1 ; PPC64LE-NEXT: .LBB321_3: @@ -5486,13 +5486,13 @@ define i8 @test322(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test322: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB322_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB322_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB322_1 ; PPC64LE-NEXT: .LBB322_3: @@ -5504,13 +5504,13 @@ define i8 @test323(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test323: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB323_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB323_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB323_1 ; PPC64LE-NEXT: .LBB323_3: @@ -5523,13 +5523,13 @@ define i8 @test324(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test324: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB324_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB324_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB324_1 ; PPC64LE-NEXT: .LBB324_3: @@ -5542,12 +5542,12 @@ define i16 @test325(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test325: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB325_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB325_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB325_1 ; PPC64LE-NEXT: .LBB325_3: @@ -5559,13 +5559,13 @@ define i16 @test326(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test326: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB326_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB326_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB326_1 ; PPC64LE-NEXT: .LBB326_3: @@ -5577,13 +5577,13 @@ define i16 @test327(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test327: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB327_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB327_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB327_1 ; PPC64LE-NEXT: .LBB327_3: @@ -5595,13 +5595,13 @@ define i16 @test328(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test328: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB328_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB328_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB328_1 ; PPC64LE-NEXT: .LBB328_3: @@ -5614,13 +5614,13 @@ define i16 @test329(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test329: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB329_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB329_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB329_1 ; PPC64LE-NEXT: .LBB329_3: @@ -5633,12 +5633,12 @@ define i32 @test330(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test330: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB330_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB330_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB330_1 ; PPC64LE-NEXT: .LBB330_3: @@ -5650,13 +5650,13 @@ define i32 @test331(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test331: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB331_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB331_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB331_1 ; PPC64LE-NEXT: .LBB331_3: @@ -5668,13 +5668,13 @@ define i32 @test332(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test332: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB332_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB332_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB332_1 ; PPC64LE-NEXT: .LBB332_3: @@ -5686,13 +5686,13 @@ define i32 @test333(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test333: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB333_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB333_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB333_1 ; PPC64LE-NEXT: .LBB333_3: @@ -5705,13 +5705,13 @@ define i32 @test334(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test334: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB334_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB334_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB334_1 ; PPC64LE-NEXT: .LBB334_3: @@ -5724,12 +5724,12 @@ define i64 @test335(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test335: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB335_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB335_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB335_1 ; PPC64LE-NEXT: .LBB335_3: @@ -5741,13 +5741,13 @@ define i64 @test336(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test336: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB336_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpld 4, 3 ; PPC64LE-NEXT: bge 0, .LBB336_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB336_1 ; PPC64LE-NEXT: .LBB336_3: @@ -5759,13 +5759,13 @@ define i64 @test337(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test337: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB337_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB337_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB337_1 ; PPC64LE-NEXT: .LBB337_3: @@ -5777,13 +5777,13 @@ define i64 @test338(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test338: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB338_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB338_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB338_1 ; PPC64LE-NEXT: .LBB338_3: @@ -5796,13 +5796,13 @@ define i64 @test339(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test339: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB339_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB339_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB339_1 ; PPC64LE-NEXT: .LBB339_3: @@ -5815,12 +5815,12 @@ define i8 @test340(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test340: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB340_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB340_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -5829,13 +5829,13 @@ define i8 @test341(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test341: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB341_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB341_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -5844,13 +5844,13 @@ define i8 @test342(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test342: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB342_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB342_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") release @@ -5859,13 +5859,13 @@ define i8 @test343(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test343: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB343_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB343_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -5875,13 +5875,13 @@ define i8 @test344(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test344: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB344_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB344_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -5891,12 +5891,12 @@ define i16 @test345(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test345: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB345_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB345_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -5905,13 +5905,13 @@ define i16 @test346(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test346: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB346_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB346_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -5920,13 +5920,13 @@ define i16 @test347(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test347: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB347_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB347_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") release @@ -5935,13 +5935,13 @@ define i16 @test348(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test348: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB348_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB348_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -5951,13 +5951,13 @@ define i16 @test349(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test349: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB349_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB349_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -5967,12 +5967,12 @@ define i32 @test350(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test350: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB350_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB350_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -5981,13 +5981,13 @@ define i32 @test351(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test351: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB351_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB351_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -5996,13 +5996,13 @@ define i32 @test352(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test352: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB352_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB352_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") release @@ -6011,13 +6011,13 @@ define i32 @test353(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test353: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB353_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB353_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6027,13 +6027,13 @@ define i32 @test354(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test354: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB354_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB354_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6043,12 +6043,12 @@ define i64 @test355(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test355: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB355_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB355_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -6057,13 +6057,13 @@ define i64 @test356(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test356: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB356_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB356_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -6072,13 +6072,13 @@ define i64 @test357(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test357: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB357_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB357_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") release @@ -6087,13 +6087,13 @@ define i64 @test358(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test358: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB358_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB358_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6103,13 +6103,13 @@ define i64 @test359(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test359: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB359_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB359_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6119,13 +6119,13 @@ define i8 @test360(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test360: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB360_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB360_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -6134,14 +6134,14 @@ define i8 @test361(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test361: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB361_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB361_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -6150,14 +6150,14 @@ define i8 @test362(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test362: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB362_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB362_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") release @@ -6166,14 +6166,14 @@ define i8 @test363(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test363: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB363_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB363_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6183,14 +6183,14 @@ define i8 @test364(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test364: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB364_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB364_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6200,13 +6200,13 @@ define i16 @test365(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test365: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB365_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB365_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -6215,14 +6215,14 @@ define i16 @test366(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test366: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB366_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB366_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -6231,14 +6231,14 @@ define i16 @test367(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test367: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB367_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB367_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") release @@ -6247,14 +6247,14 @@ define i16 @test368(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test368: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB368_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB368_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6264,14 +6264,14 @@ define i16 @test369(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test369: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB369_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB369_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6281,13 +6281,13 @@ define i32 @test370(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test370: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB370_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB370_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -6296,14 +6296,14 @@ define i32 @test371(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test371: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB371_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB371_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -6312,14 +6312,14 @@ define i32 @test372(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test372: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB372_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB372_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") release @@ -6328,14 +6328,14 @@ define i32 @test373(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test373: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB373_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB373_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6345,14 +6345,14 @@ define i32 @test374(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test374: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB374_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB374_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6362,13 +6362,13 @@ define i64 @test375(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test375: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB375_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB375_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -6377,14 +6377,14 @@ define i64 @test376(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test376: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB376_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB376_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -6393,14 +6393,14 @@ define i64 @test377(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test377: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB377_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB377_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") release @@ -6409,14 +6409,14 @@ define i64 @test378(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test378: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB378_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB378_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6426,14 +6426,14 @@ define i64 @test379(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test379: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB379_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB379_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6443,13 +6443,13 @@ define i8 @test380(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test380: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB380_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB380_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -6458,14 +6458,14 @@ define i8 @test381(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test381: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB381_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB381_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -6474,14 +6474,14 @@ define i8 @test382(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test382: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB382_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB382_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") release @@ -6490,14 +6490,14 @@ define i8 @test383(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test383: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB383_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB383_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6507,14 +6507,14 @@ define i8 @test384(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test384: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB384_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB384_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6524,13 +6524,13 @@ define i16 @test385(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test385: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB385_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB385_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -6539,14 +6539,14 @@ define i16 @test386(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test386: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB386_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB386_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -6555,14 +6555,14 @@ define i16 @test387(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test387: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB387_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB387_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") release @@ -6571,14 +6571,14 @@ define i16 @test388(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test388: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB388_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB388_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6588,14 +6588,14 @@ define i16 @test389(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test389: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB389_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB389_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6605,13 +6605,13 @@ define i32 @test390(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test390: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB390_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB390_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -6620,14 +6620,14 @@ define i32 @test391(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test391: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB391_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB391_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -6636,14 +6636,14 @@ define i32 @test392(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test392: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB392_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB392_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") release @@ -6652,14 +6652,14 @@ define i32 @test393(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test393: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB393_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB393_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6669,14 +6669,14 @@ define i32 @test394(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test394: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB394_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB394_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6686,13 +6686,13 @@ define i64 @test395(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test395: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB395_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB395_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -6701,14 +6701,14 @@ define i64 @test396(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test396: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB396_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: sub 6, 3, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB396_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -6717,14 +6717,14 @@ define i64 @test397(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test397: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB397_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB397_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") release @@ -6733,14 +6733,14 @@ define i64 @test398(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test398: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB398_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB398_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6750,14 +6750,14 @@ define i64 @test399(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test399: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB399_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB399_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6767,13 +6767,13 @@ define i8 @test400(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test400: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB400_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB400_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -6782,14 +6782,14 @@ define i8 @test401(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test401: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB401_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB401_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -6798,14 +6798,14 @@ define i8 @test402(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test402: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB402_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB402_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") release @@ -6814,14 +6814,14 @@ define i8 @test403(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test403: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB403_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB403_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6831,14 +6831,14 @@ define i8 @test404(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test404: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB404_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB404_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6848,13 +6848,13 @@ define i16 @test405(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test405: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB405_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB405_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -6863,14 +6863,14 @@ define i16 @test406(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test406: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB406_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB406_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -6879,14 +6879,14 @@ define i16 @test407(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test407: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB407_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB407_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") release @@ -6895,14 +6895,14 @@ define i16 @test408(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test408: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB408_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB408_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6912,14 +6912,14 @@ define i16 @test409(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test409: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB409_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB409_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6929,13 +6929,13 @@ define i32 @test410(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test410: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB410_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB410_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -6944,14 +6944,14 @@ define i32 @test411(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test411: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB411_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB411_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -6960,14 +6960,14 @@ define i32 @test412(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test412: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB412_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB412_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") release @@ -6976,14 +6976,14 @@ define i32 @test413(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test413: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB413_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB413_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6993,14 +6993,14 @@ define i32 @test414(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test414: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB414_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB414_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7010,13 +7010,13 @@ define i64 @test415(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test415: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB415_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB415_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -7025,14 +7025,14 @@ define i64 @test416(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test416: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB416_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB416_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -7041,14 +7041,14 @@ define i64 @test417(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test417: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB417_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB417_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") release @@ -7057,14 +7057,14 @@ define i64 @test418(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test418: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB418_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB418_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7074,14 +7074,14 @@ define i64 @test419(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test419: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB419_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB419_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7091,13 +7091,13 @@ define i8 @test420(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test420: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB420_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB420_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -7106,14 +7106,14 @@ define i8 @test421(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test421: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB421_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB421_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -7122,14 +7122,14 @@ define i8 @test422(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test422: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB422_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB422_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") release @@ -7138,14 +7138,14 @@ define i8 @test423(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test423: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB423_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB423_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7155,14 +7155,14 @@ define i8 @test424(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test424: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB424_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB424_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7172,13 +7172,13 @@ define i16 @test425(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test425: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB425_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB425_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -7187,14 +7187,14 @@ define i16 @test426(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test426: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB426_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB426_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -7203,14 +7203,14 @@ define i16 @test427(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test427: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB427_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB427_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") release @@ -7219,14 +7219,14 @@ define i16 @test428(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test428: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB428_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB428_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7236,14 +7236,14 @@ define i16 @test429(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test429: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB429_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB429_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7253,13 +7253,13 @@ define i32 @test430(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test430: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB430_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB430_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -7268,14 +7268,14 @@ define i32 @test431(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test431: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB431_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB431_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -7284,14 +7284,14 @@ define i32 @test432(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test432: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB432_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB432_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") release @@ -7300,14 +7300,14 @@ define i32 @test433(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test433: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB433_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB433_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7317,14 +7317,14 @@ define i32 @test434(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test434: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB434_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB434_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7334,13 +7334,13 @@ define i64 @test435(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test435: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB435_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB435_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -7349,14 +7349,14 @@ define i64 @test436(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test436: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB436_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB436_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -7365,14 +7365,14 @@ define i64 @test437(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test437: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB437_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB437_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") release @@ -7381,14 +7381,14 @@ define i64 @test438(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test438: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB438_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB438_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7398,14 +7398,14 @@ define i64 @test439(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test439: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB439_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB439_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7415,13 +7415,13 @@ define i8 @test440(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test440: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB440_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB440_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -7430,14 +7430,14 @@ define i8 @test441(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test441: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB441_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB441_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -7446,14 +7446,14 @@ define i8 @test442(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test442: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB442_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB442_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") release @@ -7462,14 +7462,14 @@ define i8 @test443(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test443: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB443_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB443_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7479,14 +7479,14 @@ define i8 @test444(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test444: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB444_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB444_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7496,13 +7496,13 @@ define i16 @test445(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test445: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB445_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB445_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -7511,14 +7511,14 @@ define i16 @test446(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test446: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB446_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB446_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -7527,14 +7527,14 @@ define i16 @test447(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test447: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB447_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB447_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") release @@ -7543,14 +7543,14 @@ define i16 @test448(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test448: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB448_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB448_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7560,14 +7560,14 @@ define i16 @test449(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test449: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB449_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB449_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7577,13 +7577,13 @@ define i32 @test450(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test450: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB450_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB450_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -7592,14 +7592,14 @@ define i32 @test451(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test451: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB451_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB451_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -7608,14 +7608,14 @@ define i32 @test452(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test452: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB452_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB452_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") release @@ -7624,14 +7624,14 @@ define i32 @test453(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test453: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB453_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB453_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7641,14 +7641,14 @@ define i32 @test454(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test454: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB454_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB454_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7658,13 +7658,13 @@ define i64 @test455(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test455: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB455_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB455_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -7673,14 +7673,14 @@ define i64 @test456(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test456: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB456_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB456_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -7689,14 +7689,14 @@ define i64 @test457(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test457: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB457_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB457_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") release @@ -7705,14 +7705,14 @@ define i64 @test458(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test458: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB458_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB458_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7722,14 +7722,14 @@ define i64 @test459(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test459: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB459_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB459_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7739,13 +7739,13 @@ define i8 @test460(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test460: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB460_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB460_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -7754,14 +7754,14 @@ define i8 @test461(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test461: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB461_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB461_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -7770,14 +7770,14 @@ define i8 @test462(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test462: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB462_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB462_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") release @@ -7786,14 +7786,14 @@ define i8 @test463(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test463: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB463_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB463_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7803,14 +7803,14 @@ define i8 @test464(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test464: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB464_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB464_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7820,13 +7820,13 @@ define i16 @test465(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test465: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB465_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB465_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -7835,14 +7835,14 @@ define i16 @test466(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test466: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB466_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB466_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -7851,14 +7851,14 @@ define i16 @test467(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test467: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB467_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB467_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") release @@ -7867,14 +7867,14 @@ define i16 @test468(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test468: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB468_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB468_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7884,14 +7884,14 @@ define i16 @test469(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test469: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB469_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB469_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7901,13 +7901,13 @@ define i32 @test470(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test470: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB470_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB470_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -7916,14 +7916,14 @@ define i32 @test471(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test471: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB471_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB471_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -7932,14 +7932,14 @@ define i32 @test472(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test472: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB472_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB472_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") release @@ -7948,14 +7948,14 @@ define i32 @test473(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test473: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB473_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB473_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7965,14 +7965,14 @@ define i32 @test474(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test474: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB474_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB474_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7982,13 +7982,13 @@ define i64 @test475(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test475: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB475_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB475_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -7997,14 +7997,14 @@ define i64 @test476(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test476: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB476_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB476_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -8013,14 +8013,14 @@ define i64 @test477(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test477: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB477_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB477_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") release @@ -8029,14 +8029,14 @@ define i64 @test478(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test478: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB478_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB478_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -8046,14 +8046,14 @@ define i64 @test479(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test479: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB479_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB479_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -8063,13 +8063,13 @@ define i8 @test480(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test480: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB480_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB480_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB480_1 ; PPC64LE-NEXT: .LBB480_3: @@ -8081,14 +8081,14 @@ define i8 @test481(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test481: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB481_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: extsb 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB481_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB481_1 ; PPC64LE-NEXT: .LBB481_3: @@ -8100,14 +8100,14 @@ define i8 @test482(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test482: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB482_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB482_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB482_1 ; PPC64LE-NEXT: .LBB482_3: @@ -8119,14 +8119,14 @@ define i8 @test483(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test483: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB483_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB483_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB483_1 ; PPC64LE-NEXT: .LBB483_3: @@ -8139,14 +8139,14 @@ define i8 @test484(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test484: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB484_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB484_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB484_1 ; PPC64LE-NEXT: .LBB484_3: @@ -8159,13 +8159,13 @@ define i16 @test485(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test485: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB485_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB485_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB485_1 ; PPC64LE-NEXT: .LBB485_3: @@ -8177,14 +8177,14 @@ define i16 @test486(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test486: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB486_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: extsh 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB486_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB486_1 ; PPC64LE-NEXT: .LBB486_3: @@ -8196,14 +8196,14 @@ define i16 @test487(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test487: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB487_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB487_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB487_1 ; PPC64LE-NEXT: .LBB487_3: @@ -8215,14 +8215,14 @@ define i16 @test488(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test488: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB488_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB488_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB488_1 ; PPC64LE-NEXT: .LBB488_3: @@ -8235,14 +8235,14 @@ define i16 @test489(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test489: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB489_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB489_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB489_1 ; PPC64LE-NEXT: .LBB489_3: @@ -8255,12 +8255,12 @@ define i32 @test490(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test490: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB490_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB490_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB490_1 ; PPC64LE-NEXT: .LBB490_3: @@ -8272,13 +8272,13 @@ define i32 @test491(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test491: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB491_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmpw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB491_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB491_1 ; PPC64LE-NEXT: .LBB491_3: @@ -8290,13 +8290,13 @@ define i32 @test492(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test492: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB492_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB492_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB492_1 ; PPC64LE-NEXT: .LBB492_3: @@ -8308,13 +8308,13 @@ define i32 @test493(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test493: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB493_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB493_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB493_1 ; PPC64LE-NEXT: .LBB493_3: @@ -8327,13 +8327,13 @@ define i32 @test494(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test494: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB494_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB494_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB494_1 ; PPC64LE-NEXT: .LBB494_3: @@ -8346,12 +8346,12 @@ define i64 @test495(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test495: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB495_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB495_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB495_1 ; PPC64LE-NEXT: .LBB495_3: @@ -8363,13 +8363,13 @@ define i64 @test496(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test496: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB496_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpd 4, 3 ; PPC64LE-NEXT: ble 0, .LBB496_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB496_1 ; PPC64LE-NEXT: .LBB496_3: @@ -8381,13 +8381,13 @@ define i64 @test497(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test497: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB497_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB497_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB497_1 ; PPC64LE-NEXT: .LBB497_3: @@ -8399,13 +8399,13 @@ define i64 @test498(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test498: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB498_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB498_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB498_1 ; PPC64LE-NEXT: .LBB498_3: @@ -8418,13 +8418,13 @@ define i64 @test499(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test499: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB499_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB499_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB499_1 ; PPC64LE-NEXT: .LBB499_3: @@ -8437,13 +8437,13 @@ define i8 @test500(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test500: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB500_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB500_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB500_1 ; PPC64LE-NEXT: .LBB500_3: @@ -8455,14 +8455,14 @@ define i8 @test501(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test501: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB501_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: extsb 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB501_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB501_1 ; PPC64LE-NEXT: .LBB501_3: @@ -8474,14 +8474,14 @@ define i8 @test502(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test502: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB502_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB502_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB502_1 ; PPC64LE-NEXT: .LBB502_3: @@ -8493,14 +8493,14 @@ define i8 @test503(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test503: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB503_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB503_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB503_1 ; PPC64LE-NEXT: .LBB503_3: @@ -8513,14 +8513,14 @@ define i8 @test504(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test504: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB504_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB504_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB504_1 ; PPC64LE-NEXT: .LBB504_3: @@ -8533,13 +8533,13 @@ define i16 @test505(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test505: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB505_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB505_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB505_1 ; PPC64LE-NEXT: .LBB505_3: @@ -8551,14 +8551,14 @@ define i16 @test506(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test506: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB506_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: extsh 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB506_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB506_1 ; PPC64LE-NEXT: .LBB506_3: @@ -8570,14 +8570,14 @@ define i16 @test507(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test507: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB507_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB507_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB507_1 ; PPC64LE-NEXT: .LBB507_3: @@ -8589,14 +8589,14 @@ define i16 @test508(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test508: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB508_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB508_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB508_1 ; PPC64LE-NEXT: .LBB508_3: @@ -8609,14 +8609,14 @@ define i16 @test509(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test509: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB509_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB509_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB509_1 ; PPC64LE-NEXT: .LBB509_3: @@ -8629,12 +8629,12 @@ define i32 @test510(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test510: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB510_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB510_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB510_1 ; PPC64LE-NEXT: .LBB510_3: @@ -8646,13 +8646,13 @@ define i32 @test511(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test511: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB511_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmpw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB511_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB511_1 ; PPC64LE-NEXT: .LBB511_3: @@ -8664,13 +8664,13 @@ define i32 @test512(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test512: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB512_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB512_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB512_1 ; PPC64LE-NEXT: .LBB512_3: @@ -8682,13 +8682,13 @@ define i32 @test513(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test513: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB513_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB513_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB513_1 ; PPC64LE-NEXT: .LBB513_3: @@ -8701,13 +8701,13 @@ define i32 @test514(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test514: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB514_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB514_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB514_1 ; PPC64LE-NEXT: .LBB514_3: @@ -8720,12 +8720,12 @@ define i64 @test515(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test515: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB515_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB515_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB515_1 ; PPC64LE-NEXT: .LBB515_3: @@ -8737,13 +8737,13 @@ define i64 @test516(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test516: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB516_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpd 4, 3 ; PPC64LE-NEXT: bge 0, .LBB516_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB516_1 ; PPC64LE-NEXT: .LBB516_3: @@ -8755,13 +8755,13 @@ define i64 @test517(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test517: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB517_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB517_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB517_1 ; PPC64LE-NEXT: .LBB517_3: @@ -8773,13 +8773,13 @@ define i64 @test518(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test518: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB518_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB518_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB518_1 ; PPC64LE-NEXT: .LBB518_3: @@ -8792,13 +8792,13 @@ define i64 @test519(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test519: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB519_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB519_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB519_1 ; PPC64LE-NEXT: .LBB519_3: @@ -8811,12 +8811,12 @@ define i8 @test520(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test520: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB520_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB520_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB520_1 ; PPC64LE-NEXT: .LBB520_3: @@ -8828,13 +8828,13 @@ define i8 @test521(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test521: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB521_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB521_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB521_1 ; PPC64LE-NEXT: .LBB521_3: @@ -8846,13 +8846,13 @@ define i8 @test522(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test522: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB522_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB522_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB522_1 ; PPC64LE-NEXT: .LBB522_3: @@ -8864,13 +8864,13 @@ define i8 @test523(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test523: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB523_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB523_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB523_1 ; PPC64LE-NEXT: .LBB523_3: @@ -8883,13 +8883,13 @@ define i8 @test524(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test524: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB524_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB524_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB524_1 ; PPC64LE-NEXT: .LBB524_3: @@ -8902,12 +8902,12 @@ define i16 @test525(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test525: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB525_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB525_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB525_1 ; PPC64LE-NEXT: .LBB525_3: @@ -8919,13 +8919,13 @@ define i16 @test526(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test526: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB526_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB526_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB526_1 ; PPC64LE-NEXT: .LBB526_3: @@ -8937,13 +8937,13 @@ define i16 @test527(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test527: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB527_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB527_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB527_1 ; PPC64LE-NEXT: .LBB527_3: @@ -8955,13 +8955,13 @@ define i16 @test528(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test528: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB528_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB528_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB528_1 ; PPC64LE-NEXT: .LBB528_3: @@ -8974,13 +8974,13 @@ define i16 @test529(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test529: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB529_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB529_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB529_1 ; PPC64LE-NEXT: .LBB529_3: @@ -8993,12 +8993,12 @@ define i32 @test530(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test530: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB530_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB530_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB530_1 ; PPC64LE-NEXT: .LBB530_3: @@ -9010,13 +9010,13 @@ define i32 @test531(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test531: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB531_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB531_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB531_1 ; PPC64LE-NEXT: .LBB531_3: @@ -9028,13 +9028,13 @@ define i32 @test532(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test532: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB532_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB532_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB532_1 ; PPC64LE-NEXT: .LBB532_3: @@ -9046,13 +9046,13 @@ define i32 @test533(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test533: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB533_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB533_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB533_1 ; PPC64LE-NEXT: .LBB533_3: @@ -9065,13 +9065,13 @@ define i32 @test534(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test534: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB534_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB534_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB534_1 ; PPC64LE-NEXT: .LBB534_3: @@ -9084,12 +9084,12 @@ define i64 @test535(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test535: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB535_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB535_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB535_1 ; PPC64LE-NEXT: .LBB535_3: @@ -9101,13 +9101,13 @@ define i64 @test536(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test536: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB536_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpld 4, 3 ; PPC64LE-NEXT: ble 0, .LBB536_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB536_1 ; PPC64LE-NEXT: .LBB536_3: @@ -9119,13 +9119,13 @@ define i64 @test537(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test537: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB537_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB537_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB537_1 ; PPC64LE-NEXT: .LBB537_3: @@ -9137,13 +9137,13 @@ define i64 @test538(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test538: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB538_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB538_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB538_1 ; PPC64LE-NEXT: .LBB538_3: @@ -9156,13 +9156,13 @@ define i64 @test539(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test539: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB539_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB539_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB539_1 ; PPC64LE-NEXT: .LBB539_3: @@ -9175,12 +9175,12 @@ define i8 @test540(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test540: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB540_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB540_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB540_1 ; PPC64LE-NEXT: .LBB540_3: @@ -9192,13 +9192,13 @@ define i8 @test541(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test541: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB541_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB541_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB541_1 ; PPC64LE-NEXT: .LBB541_3: @@ -9210,13 +9210,13 @@ define i8 @test542(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test542: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB542_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB542_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB542_1 ; PPC64LE-NEXT: .LBB542_3: @@ -9228,13 +9228,13 @@ define i8 @test543(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test543: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB543_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB543_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB543_1 ; PPC64LE-NEXT: .LBB543_3: @@ -9247,13 +9247,13 @@ define i8 @test544(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test544: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB544_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB544_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB544_1 ; PPC64LE-NEXT: .LBB544_3: @@ -9266,12 +9266,12 @@ define i16 @test545(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test545: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB545_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB545_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB545_1 ; PPC64LE-NEXT: .LBB545_3: @@ -9283,13 +9283,13 @@ define i16 @test546(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test546: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB546_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB546_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB546_1 ; PPC64LE-NEXT: .LBB546_3: @@ -9301,13 +9301,13 @@ define i16 @test547(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test547: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB547_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB547_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB547_1 ; PPC64LE-NEXT: .LBB547_3: @@ -9319,13 +9319,13 @@ define i16 @test548(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test548: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB548_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB548_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB548_1 ; PPC64LE-NEXT: .LBB548_3: @@ -9338,13 +9338,13 @@ define i16 @test549(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test549: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB549_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB549_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB549_1 ; PPC64LE-NEXT: .LBB549_3: @@ -9357,12 +9357,12 @@ define i32 @test550(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test550: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB550_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB550_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB550_1 ; PPC64LE-NEXT: .LBB550_3: @@ -9374,13 +9374,13 @@ define i32 @test551(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test551: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB551_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB551_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB551_1 ; PPC64LE-NEXT: .LBB551_3: @@ -9392,13 +9392,13 @@ define i32 @test552(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test552: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB552_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB552_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB552_1 ; PPC64LE-NEXT: .LBB552_3: @@ -9410,13 +9410,13 @@ define i32 @test553(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test553: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB553_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB553_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB553_1 ; PPC64LE-NEXT: .LBB553_3: @@ -9429,13 +9429,13 @@ define i32 @test554(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test554: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB554_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB554_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB554_1 ; PPC64LE-NEXT: .LBB554_3: @@ -9448,12 +9448,12 @@ define i64 @test555(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test555: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB555_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB555_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB555_1 ; PPC64LE-NEXT: .LBB555_3: @@ -9465,13 +9465,13 @@ define i64 @test556(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test556: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB556_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpld 4, 3 ; PPC64LE-NEXT: bge 0, .LBB556_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB556_1 ; PPC64LE-NEXT: .LBB556_3: @@ -9483,13 +9483,13 @@ define i64 @test557(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test557: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB557_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB557_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB557_1 ; PPC64LE-NEXT: .LBB557_3: @@ -9501,13 +9501,13 @@ define i64 @test558(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test558: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB558_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB558_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB558_1 ; PPC64LE-NEXT: .LBB558_3: @@ -9520,13 +9520,13 @@ define i64 @test559(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test559: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB559_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB559_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB559_1 ; PPC64LE-NEXT: .LBB559_3: @@ -9540,7 +9540,7 @@ ; The second load should never be scheduled before isync. define i32 @test_ordering0(i32* %ptr1, i32* %ptr2) { ; PPC64LE-LABEL: test_ordering0: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwz 4, 0(3) ; PPC64LE-NEXT: cmpd 7, 4, 4 ; PPC64LE-NEXT: bne- 7, .+4 @@ -9557,7 +9557,7 @@ ; The second store should never be scheduled before isync. define i32 @test_ordering1(i32* %ptr1, i32 %val1, i32* %ptr2) { ; PPC64LE-LABEL: test_ordering1: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwz 3, 0(3) ; PPC64LE-NEXT: cmpd 7, 3, 3 ; PPC64LE-NEXT: bne- 7, .+4 Index: llvm/trunk/test/CodeGen/PowerPC/branch_coalesce.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/branch_coalesce.ll +++ llvm/trunk/test/CodeGen/PowerPC/branch_coalesce.ll @@ -23,10 +23,10 @@ ; CHECK: blr ; CHECK-NOCOALESCE-LABEL: testBranchCoal: -; CHECK-NOCOALESCE: # BB#0: # %entry +; CHECK-NOCOALESCE: # %bb.0: # %entry ; CHECK-NOCOALESCE-NEXT: cmplwi 0, 6, 0 ; CHECK-NOCOALESCE-NEXT: bne 0, .LBB0_5 -; CHECK-NOCOALESCE-NEXT: # BB#1: # %entry +; CHECK-NOCOALESCE-NEXT: # %bb.1: # %entry ; CHECK-NOCOALESCE-NEXT: bne 0, .LBB0_6 ; CHECK-NOCOALESCE-NEXT: .LBB0_2: # %entry ; CHECK-NOCOALESCE-NEXT: beq 0, .LBB0_4 Index: llvm/trunk/test/CodeGen/PowerPC/fabs.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/fabs.ll +++ llvm/trunk/test/CodeGen/PowerPC/fabs.ll @@ -2,7 +2,7 @@ define double @fabs(double %f) { ; CHECK-LABEL: fabs: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: fabs f1, f1 ; CHECK-NEXT: blr ; @@ -12,7 +12,7 @@ define float @bitcast_fabs(float %x) { ; CHECK-LABEL: bitcast_fabs: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: stfs f1, -8(r1) ; CHECK-NEXT: nop ; CHECK-NEXT: nop Index: llvm/trunk/test/CodeGen/PowerPC/fma-aggr-FMF.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/fma-aggr-FMF.ll +++ llvm/trunk/test/CodeGen/PowerPC/fma-aggr-FMF.ll @@ -3,7 +3,7 @@ define float @can_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4) { ; CHECK-LABEL: can_fma_with_fewer_uses: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xsmulsp 0, 1, 2 ; CHECK-NEXT: fmr 1, 0 ; CHECK-NEXT: xsmaddasp 1, 3, 4 @@ -21,7 +21,7 @@ ; around beside the fma. define float @no_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4) { ; CHECK-LABEL: no_fma_with_fewer_uses: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xsmulsp 0, 3, 4 ; CHECK-NEXT: xsmulsp 13, 1, 2 ; CHECK-NEXT: xsmaddasp 0, 1, 2 Index: llvm/trunk/test/CodeGen/PowerPC/fp64-to-int16.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/fp64-to-int16.ll +++ llvm/trunk/test/CodeGen/PowerPC/fp64-to-int16.ll @@ -4,7 +4,7 @@ define i1 @Test(double %a) { ; CHECK-LABEL: Test: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xscvdpsxws 1, 1 ; CHECK-NEXT: mfvsrwz 3, 1 ; CHECK-NEXT: xori 3, 3, 65534 Index: llvm/trunk/test/CodeGen/PowerPC/hello-reloc.s =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/hello-reloc.s +++ llvm/trunk/test/CodeGen/PowerPC/hello-reloc.s @@ -11,7 +11,7 @@ .globl _main .align 4 _main: ; @main -; BB#0: ; %entry +; %bb.0: ; %entry mflr r0 stw r31, -4(r1) stw r0, 8(r1) Index: llvm/trunk/test/CodeGen/PowerPC/licm-remat.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/licm-remat.ll +++ llvm/trunk/test/CodeGen/PowerPC/licm-remat.ll @@ -18,7 +18,7 @@ define linkonce_odr void @ZN6snappyDecompressor_(%"class.snappy::SnappyDecompressor"* %this, %"class.snappy::SnappyIOVecWriter"* %writer) { ; CHECK-LABEL: ZN6snappyDecompressor_: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: addis 3, 2, _ZN6snappy8internalL8wordmaskE@toc@ha ; CHECK-DAG: addi 25, 3, _ZN6snappy8internalL8wordmaskE@toc@l ; CHECK-DAG: addis 4, 2, _ZN6snappy8internalL10char_tableE@toc@ha Index: llvm/trunk/test/CodeGen/PowerPC/licm-tocReg.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/licm-tocReg.ll +++ llvm/trunk/test/CodeGen/PowerPC/licm-tocReg.ll @@ -64,7 +64,7 @@ define signext i32 @test(i32 (i32)* nocapture %FP) local_unnamed_addr #0 { ; CHECK-LABEL: test: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis 6, 2, .LC0@toc@ha ; CHECK-NEXT: addis 4, 2, .LC1@toc@ha ; CHECK-NEXT: ld 5, .LC1@toc@l(4) Index: llvm/trunk/test/CodeGen/PowerPC/logic-ops-on-compares.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/logic-ops-on-compares.ll +++ llvm/trunk/test/CodeGen/PowerPC/logic-ops-on-compares.ll @@ -43,11 +43,11 @@ define void @neg_truncate_i32_eq(i32 *%ptr) { ; CHECK-LABEL: neg_truncate_i32_eq: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz r3, 0(r3) ; CHECK-NEXT: rldicl. r3, r3, 0, 63 ; CHECK-NEXT: bclr 12, eq, 0 -; CHECK-NEXT: # BB#1: # %if.end29.thread136 +; CHECK-NEXT: # %bb.1: # %if.end29.thread136 entry: %0 = load i32, i32* %ptr, align 4 %rem17127 = and i32 %0, 1 @@ -101,11 +101,11 @@ define void @neg_truncate_i64_eq(i64 *%ptr) { ; CHECK-LABEL: neg_truncate_i64_eq: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: ld r3, 0(r3) ; CHECK-NEXT: rldicl. r3, r3, 0, 63 ; CHECK-NEXT: bclr 12, eq, 0 -; CHECK-NEXT: # BB#1: # %if.end29.thread136 +; CHECK-NEXT: # %bb.1: # %if.end29.thread136 entry: %0 = load i64, i64* %ptr, align 4 %rem17127 = and i64 %0, 1 @@ -161,11 +161,11 @@ define void @neg_truncate_i64_ne(i64 *%ptr) { ; CHECK-LABEL: neg_truncate_i64_ne: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: ld r3, 0(r3) ; CHECK-NEXT: andi. r3, r3, 1 ; CHECK-NEXT: bclr 12, gt, 0 -; CHECK-NEXT: # BB#1: # %if.end29.thread136 +; CHECK-NEXT: # %bb.1: # %if.end29.thread136 entry: %0 = load i64, i64* %ptr, align 4 %rem17127 = and i64 %0, 1 Index: llvm/trunk/test/CodeGen/PowerPC/machine-combiner.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/machine-combiner.ll +++ llvm/trunk/test/CodeGen/PowerPC/machine-combiner.ll @@ -8,7 +8,7 @@ define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) { ; CHECK-LABEL: reassociate_adds1: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK: fadds [[REG0:[0-9]+]], 1, 2 ; CHECK: fadds [[REG1:[0-9]+]], 3, 4 ; CHECK: fadds 1, [[REG0]], [[REG1]] @@ -22,7 +22,7 @@ define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) { ; CHECK-LABEL: reassociate_adds2: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK: fadds [[REG0:[0-9]+]], 1, 2 ; CHECK: fadds [[REG1:[0-9]+]], 3, 4 ; CHECK: fadds 1, [[REG0]], [[REG1]] @@ -36,7 +36,7 @@ define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) { ; CHECK-LABEL: reassociate_adds3: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK: fadds [[REG0:[0-9]+]], 1, 2 ; CHECK: fadds [[REG1:[0-9]+]], 3, 4 ; CHECK: fadds 1, [[REG0]], [[REG1]] @@ -50,7 +50,7 @@ define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) { ; CHECK-LABEL: reassociate_adds4: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK: fadds [[REG0:[0-9]+]], 1, 2 ; CHECK: fadds [[REG1:[0-9]+]], 3, 4 ; CHECK: fadds 1, [[REG0]], [[REG1]] @@ -67,7 +67,7 @@ define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) { ; CHECK-LABEL: reassociate_adds5: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK: fadds [[REG12:[0-9]+]], 5, 6 ; CHECK: fadds [[REG0:[0-9]+]], 1, 2 ; CHECK: fadds [[REG11:[0-9]+]], 3, 4 @@ -91,7 +91,7 @@ define <4 x float> @vector_reassociate_adds1(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; CHECK-LABEL: vector_reassociate_adds1: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2 ; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4 ; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]] @@ -108,7 +108,7 @@ define <4 x float> @vector_reassociate_adds2(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; CHECK-LABEL: vector_reassociate_adds2: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2 ; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4 ; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]] @@ -125,7 +125,7 @@ define <4 x float> @vector_reassociate_adds3(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; CHECK-LABEL: vector_reassociate_adds3: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2 ; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4 ; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]] @@ -142,7 +142,7 @@ define <4 x float> @vector_reassociate_adds4(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; CHECK-LABEL: vector_reassociate_adds4: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2 ; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4 ; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]] Index: llvm/trunk/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll +++ llvm/trunk/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll @@ -17,7 +17,7 @@ ; Check 4 bytes - requires 1 load for each param. define signext i32 @zeroEqualityTest02(i8* %x, i8* %y) { ; CHECK-LABEL: zeroEqualityTest02: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lwz 3, 0(3) ; CHECK-NEXT: lwz 4, 0(4) ; CHECK-NEXT: xor 3, 3, 4 @@ -34,12 +34,12 @@ ; Check 16 bytes - requires 2 loads for each param (or use vectors?). define signext i32 @zeroEqualityTest01(i8* %x, i8* %y) { ; CHECK-LABEL: zeroEqualityTest01: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: ld 5, 0(3) ; CHECK-NEXT: ld 6, 0(4) ; CHECK-NEXT: cmpld 5, 6 ; CHECK-NEXT: bne 0, .LBB1_2 -; CHECK-NEXT: # BB#1: # %loadbb1 +; CHECK-NEXT: # %bb.1: # %loadbb1 ; CHECK-NEXT: ld 3, 8(3) ; CHECK-NEXT: ld 4, 8(4) ; CHECK-NEXT: cmpld 3, 4 @@ -59,17 +59,17 @@ ; Check 7 bytes - requires 3 loads for each param. define signext i32 @zeroEqualityTest03(i8* %x, i8* %y) { ; CHECK-LABEL: zeroEqualityTest03: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lwz 5, 0(3) ; CHECK-NEXT: lwz 6, 0(4) ; CHECK-NEXT: cmplw 5, 6 ; CHECK-NEXT: bne 0, .LBB2_3 -; CHECK-NEXT: # BB#1: # %loadbb1 +; CHECK-NEXT: # %bb.1: # %loadbb1 ; CHECK-NEXT: lhz 5, 4(3) ; CHECK-NEXT: lhz 6, 4(4) ; CHECK-NEXT: cmplw 5, 6 ; CHECK-NEXT: bne 0, .LBB2_3 -; CHECK-NEXT: # BB#2: # %loadbb2 +; CHECK-NEXT: # %bb.2: # %loadbb2 ; CHECK-NEXT: lbz 3, 6(3) ; CHECK-NEXT: lbz 4, 6(4) ; CHECK-NEXT: cmplw 3, 4 @@ -89,7 +89,7 @@ ; Validate with > 0 define signext i32 @zeroEqualityTest04() { ; CHECK-LABEL: zeroEqualityTest04: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: addis 3, 2, .LzeroEqualityTest02.buffer1@toc@ha ; CHECK-NEXT: addis 4, 2, .LzeroEqualityTest02.buffer2@toc@ha ; CHECK-NEXT: addi 6, 3, .LzeroEqualityTest02.buffer1@toc@l @@ -98,7 +98,7 @@ ; CHECK-NEXT: ldbrx 4, 0, 5 ; CHECK-NEXT: cmpld 3, 4 ; CHECK-NEXT: bne 0, .LBB3_2 -; CHECK-NEXT: # BB#1: # %loadbb1 +; CHECK-NEXT: # %bb.1: # %loadbb1 ; CHECK-NEXT: li 4, 8 ; CHECK-NEXT: ldbrx 3, 6, 4 ; CHECK-NEXT: ldbrx 4, 5, 4 @@ -125,7 +125,7 @@ ; Validate with < 0 define signext i32 @zeroEqualityTest05() { ; CHECK-LABEL: zeroEqualityTest05: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: addis 3, 2, .LzeroEqualityTest03.buffer1@toc@ha ; CHECK-NEXT: addis 4, 2, .LzeroEqualityTest03.buffer2@toc@ha ; CHECK-NEXT: addi 6, 3, .LzeroEqualityTest03.buffer1@toc@l @@ -134,7 +134,7 @@ ; CHECK-NEXT: ldbrx 4, 0, 5 ; CHECK-NEXT: cmpld 3, 4 ; CHECK-NEXT: bne 0, .LBB4_2 -; CHECK-NEXT: # BB#1: # %loadbb1 +; CHECK-NEXT: # %bb.1: # %loadbb1 ; CHECK-NEXT: li 4, 8 ; CHECK-NEXT: ldbrx 3, 6, 4 ; CHECK-NEXT: ldbrx 4, 5, 4 @@ -160,7 +160,7 @@ ; Validate with memcmp()?: define signext i32 @equalityFoldTwoConstants() { ; CHECK-LABEL: equalityFoldTwoConstants: -; CHECK: # BB#0: # %endblock +; CHECK: # %bb.0: # %endblock ; CHECK-NEXT: li 3, 1 ; CHECK-NEXT: blr %call = tail call signext i32 @memcmp(i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer1 to i8*), i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer2 to i8*), i64 16) @@ -171,13 +171,13 @@ define signext i32 @equalityFoldOneConstant(i8* %X) { ; CHECK-LABEL: equalityFoldOneConstant: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: li 5, 1 ; CHECK-NEXT: ld 4, 0(3) ; CHECK-NEXT: sldi 5, 5, 32 ; CHECK-NEXT: cmpld 4, 5 ; CHECK-NEXT: bne 0, .LBB6_2 -; CHECK-NEXT: # BB#1: # %loadbb1 +; CHECK-NEXT: # %bb.1: # %loadbb1 ; CHECK-NEXT: li 4, 3 ; CHECK-NEXT: ld 3, 8(3) ; CHECK-NEXT: sldi 4, 4, 32 @@ -199,7 +199,7 @@ define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) { ; CHECK-LABEL: length2_eq_nobuiltin_attr: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: mflr 0 ; CHECK-NEXT: std 0, 16(1) ; CHECK-NEXT: stdu 1, -32(1) Index: llvm/trunk/test/CodeGen/PowerPC/memcmp.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/memcmp.ll +++ llvm/trunk/test/CodeGen/PowerPC/memcmp.ll @@ -3,7 +3,7 @@ define signext i32 @memcmp8(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { ; CHECK-LABEL: memcmp8: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: ldbrx 3, 0, 3 ; CHECK-NEXT: ldbrx 4, 0, 4 ; CHECK-NEXT: subfc 5, 3, 4 @@ -23,7 +23,7 @@ define signext i32 @memcmp4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { ; CHECK-LABEL: memcmp4: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lwbrx 3, 0, 3 ; CHECK-NEXT: lwbrx 4, 0, 4 ; CHECK-NEXT: sub 5, 4, 3 @@ -41,7 +41,7 @@ define signext i32 @memcmp2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { ; CHECK-LABEL: memcmp2: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lhbrx 3, 0, 3 ; CHECK-NEXT: lhbrx 4, 0, 4 ; CHECK-NEXT: subf 3, 4, 3 @@ -55,7 +55,7 @@ define signext i32 @memcmp1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { ; CHECK-LABEL: memcmp1: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lbz 3, 0(3) ; CHECK-NEXT: lbz 4, 0(4) ; CHECK-NEXT: subf 3, 4, 3 Index: llvm/trunk/test/CodeGen/PowerPC/negate-i1.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/negate-i1.ll +++ llvm/trunk/test/CodeGen/PowerPC/negate-i1.ll @@ -4,7 +4,7 @@ define i32 @select_i32_neg1_or_0(i1 %a) { ; CHECK-LABEL: select_i32_neg1_or_0: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: clrldi 3, 3, 63 ; CHECK-NEXT: neg 3, 3 ; CHECK-NEXT: blr @@ -15,7 +15,7 @@ define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) { ; CHECK-LABEL: select_i32_neg1_or_0_zeroext: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: neg 3, 3 ; CHECK-NEXT: blr ; Index: llvm/trunk/test/CodeGen/PowerPC/ppc32-nest.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/ppc32-nest.ll +++ llvm/trunk/test/CodeGen/PowerPC/ppc32-nest.ll @@ -7,7 +7,7 @@ define i8* @nest_receiver(i8* nest %arg) nounwind { ; CHECK-LABEL: nest_receiver: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: mr 3, 11 ; CHECK-NEXT: blr Index: llvm/trunk/test/CodeGen/PowerPC/ppc64-nest.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/ppc64-nest.ll +++ llvm/trunk/test/CodeGen/PowerPC/ppc64-nest.ll @@ -7,7 +7,7 @@ define i8* @nest_receiver(i8* nest %arg) nounwind { ; CHECK-LABEL: nest_receiver: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: mr 3, 11 ; CHECK-NEXT: blr Index: llvm/trunk/test/CodeGen/PowerPC/pr32140.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/pr32140.ll +++ llvm/trunk/test/CodeGen/PowerPC/pr32140.ll @@ -9,7 +9,7 @@ define void @bswapStorei64Toi32() { ; CHECK-LABEL: bswapStorei64Toi32: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: lwa 3, 0(3) ; CHECK-NEXT: rldicl 3, 3, 32, 32 ; CHECK-NEXT: stwbrx 3, 0, 4 @@ -25,7 +25,7 @@ define void @bswapStorei32Toi16() { ; CHECK-LABEL: bswapStorei32Toi16: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: lha 3, 0(3) ; CHECK-NEXT: srwi 3, 3, 16 ; CHECK-NEXT: sthbrx 3, 0, 4 @@ -41,7 +41,7 @@ define void @bswapStorei64Toi16() { ; CHECK-LABEL: bswapStorei64Toi16: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: lha 3, 0(3) ; CHECK-NEXT: rldicl 3, 3, 16, 48 ; CHECK-NEXT: sthbrx 3, 0, 4 Index: llvm/trunk/test/CodeGen/PowerPC/pr33093.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/pr33093.ll +++ llvm/trunk/test/CodeGen/PowerPC/pr33093.ll @@ -4,7 +4,7 @@ define zeroext i32 @ReverseBits(i32 zeroext %n) { ; CHECK-LABEL: ReverseBits: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lis 4, -21846 ; CHECK-NEXT: lis 5, 21845 ; CHECK-NEXT: slwi 6, 3, 1 @@ -68,7 +68,7 @@ define i64 @ReverseBits64(i64 %n) { ; CHECK-LABEL: ReverseBits64: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lis 4, -21846 ; CHECK-NEXT: lis 5, 21845 ; CHECK-NEXT: lis 6, -13108 Index: llvm/trunk/test/CodeGen/PowerPC/select-addrRegRegOnly.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/select-addrRegRegOnly.ll +++ llvm/trunk/test/CodeGen/PowerPC/select-addrRegRegOnly.ll @@ -4,7 +4,7 @@ ; Function Attrs: norecurse nounwind readonly define float @testSingleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 { ; CHECK-LABEL: testSingleAccess: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi 3, 3, 8 ; CHECK-NEXT: lfiwax 0, 0, 3 ; CHECK-NEXT: xscvsxdsp 1, 0 @@ -19,7 +19,7 @@ ; Function Attrs: norecurse nounwind readonly define float @testMultipleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 { ; CHECK-LABEL: testMultipleAccess: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz 4, 8(3) ; CHECK-NEXT: lwz 12, 12(3) ; CHECK-NEXT: add 3, 12, 4 Index: llvm/trunk/test/CodeGen/PowerPC/select_const.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/select_const.ll +++ llvm/trunk/test/CodeGen/PowerPC/select_const.ll @@ -9,7 +9,7 @@ define i32 @select_0_or_1(i1 %cond) { ; ALL-LABEL: select_0_or_1: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: not 3, 3 ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: blr @@ -19,7 +19,7 @@ define i32 @select_0_or_1_zeroext(i1 zeroext %cond) { ; ALL-LABEL: select_0_or_1_zeroext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: xori 3, 3, 1 ; ALL-NEXT: blr %sel = select i1 %cond, i32 0, i32 1 @@ -28,7 +28,7 @@ define i32 @select_0_or_1_signext(i1 signext %cond) { ; ALL-LABEL: select_0_or_1_signext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: not 3, 3 ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: blr @@ -40,7 +40,7 @@ define i32 @select_1_or_0(i1 %cond) { ; ALL-LABEL: select_1_or_0: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: blr %sel = select i1 %cond, i32 1, i32 0 @@ -49,7 +49,7 @@ define i32 @select_1_or_0_zeroext(i1 zeroext %cond) { ; ALL-LABEL: select_1_or_0_zeroext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: blr %sel = select i1 %cond, i32 1, i32 0 ret i32 %sel @@ -57,7 +57,7 @@ define i32 @select_1_or_0_signext(i1 signext %cond) { ; ALL-LABEL: select_1_or_0_signext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: blr %sel = select i1 %cond, i32 1, i32 0 @@ -68,7 +68,7 @@ define i32 @select_0_or_neg1(i1 %cond) { ; ISEL-LABEL: select_0_or_neg1: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -77,7 +77,7 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_0_or_neg1: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 @@ -93,7 +93,7 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) { ; ISEL-LABEL: select_0_or_neg1_zeroext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -102,7 +102,7 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_0_or_neg1_zeroext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 @@ -118,7 +118,7 @@ define i32 @select_0_or_neg1_signext(i1 signext %cond) { ; ISEL-LABEL: select_0_or_neg1_signext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -127,7 +127,7 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_0_or_neg1_signext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 @@ -145,7 +145,7 @@ define i32 @select_neg1_or_0(i1 %cond) { ; ISEL-LABEL: select_neg1_or_0: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -154,13 +154,13 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_neg1_or_0: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: ori 3, 3, 65535 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i32 -1, i32 0 @@ -169,7 +169,7 @@ define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) { ; ISEL-LABEL: select_neg1_or_0_zeroext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -178,13 +178,13 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_neg1_or_0_zeroext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: ori 3, 3, 65535 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i32 -1, i32 0 @@ -193,7 +193,7 @@ define i32 @select_neg1_or_0_signext(i1 signext %cond) { ; ISEL-LABEL: select_neg1_or_0_signext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -202,13 +202,13 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_neg1_or_0_signext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: ori 3, 3, 65535 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i32 -1, i32 0 @@ -219,7 +219,7 @@ define i32 @select_Cplus1_C(i1 %cond) { ; ALL-LABEL: select_Cplus1_C: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: addi 3, 3, 41 ; ALL-NEXT: blr @@ -229,7 +229,7 @@ define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) { ; ALL-LABEL: select_Cplus1_C_zeroext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: addi 3, 3, 41 ; ALL-NEXT: blr %sel = select i1 %cond, i32 42, i32 41 @@ -238,7 +238,7 @@ define i32 @select_Cplus1_C_signext(i1 signext %cond) { ; ALL-LABEL: select_Cplus1_C_signext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: subfic 3, 3, 41 ; ALL-NEXT: blr %sel = select i1 %cond, i32 42, i32 41 @@ -249,7 +249,7 @@ define i32 @select_C_Cplus1(i1 %cond) { ; ALL-LABEL: select_C_Cplus1: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: subfic 3, 3, 42 ; ALL-NEXT: blr @@ -259,7 +259,7 @@ define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) { ; ALL-LABEL: select_C_Cplus1_zeroext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: subfic 3, 3, 42 ; ALL-NEXT: blr %sel = select i1 %cond, i32 41, i32 42 @@ -268,7 +268,7 @@ define i32 @select_C_Cplus1_signext(i1 signext %cond) { ; ALL-LABEL: select_C_Cplus1_signext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: addi 3, 3, 42 ; ALL-NEXT: blr %sel = select i1 %cond, i32 41, i32 42 @@ -280,7 +280,7 @@ define i32 @select_C1_C2(i1 %cond) { ; ISEL-LABEL: select_C1_C2: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 421 ; ISEL-NEXT: li 3, 42 @@ -288,7 +288,7 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_C1_C2: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 421 ; NO_ISEL-NEXT: li 3, 42 @@ -303,7 +303,7 @@ define i32 @select_C1_C2_zeroext(i1 zeroext %cond) { ; ISEL-LABEL: select_C1_C2_zeroext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 421 ; ISEL-NEXT: li 3, 42 @@ -311,7 +311,7 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_C1_C2_zeroext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 421 ; NO_ISEL-NEXT: li 3, 42 @@ -326,7 +326,7 @@ define i32 @select_C1_C2_signext(i1 signext %cond) { ; ISEL-LABEL: select_C1_C2_signext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 421 ; ISEL-NEXT: li 3, 42 @@ -334,7 +334,7 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_C1_C2_signext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 421 ; NO_ISEL-NEXT: li 3, 42 @@ -351,7 +351,7 @@ define i8 @sel_constants_add_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_add_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 1 ; ISEL-NEXT: li 3, 28 @@ -359,7 +359,7 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_add_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 1 ; NO_ISEL-NEXT: li 3, 28 @@ -375,7 +375,7 @@ define i8 @sel_constants_sub_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_sub_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -385,14 +385,14 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_sub_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: li 4, 18 ; NO_ISEL-NEXT: ori 3, 3, 65527 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 @@ -402,7 +402,7 @@ define i8 @sel_constants_mul_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_mul_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: lis 4, 16383 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: ori 3, 4, 65531 @@ -412,14 +412,14 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_mul_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: lis 4, 16383 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: ori 3, 4, 65531 ; NO_ISEL-NEXT: li 4, 115 ; NO_ISEL-NEXT: sldi 3, 3, 2 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 @@ -429,14 +429,14 @@ define i8 @sel_constants_sdiv_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_sdiv_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 3, 4 ; ISEL-NEXT: isel 3, 0, 3, 1 ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_sdiv_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 3, 4 ; NO_ISEL-NEXT: bc 12, 1, .LBB24_1 @@ -451,7 +451,7 @@ define i8 @sel_constants_udiv_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_udiv_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 50 ; ISEL-NEXT: li 3, 4 @@ -459,7 +459,7 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_udiv_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 50 ; NO_ISEL-NEXT: li 3, 4 @@ -475,7 +475,7 @@ define i8 @sel_constants_srem_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_srem_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: lis 4, 16383 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: ori 3, 4, 65535 @@ -485,14 +485,14 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_srem_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: lis 4, 16383 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: ori 3, 4, 65535 ; NO_ISEL-NEXT: li 4, 3 ; NO_ISEL-NEXT: sldi 3, 3, 2 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 @@ -502,7 +502,7 @@ define i8 @sel_constants_urem_constant(i1 %cond) { ; ALL-LABEL: sel_constants_urem_constant: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: rlwinm 3, 3, 0, 31, 31 ; ALL-NEXT: subfic 3, 3, 3 ; ALL-NEXT: blr @@ -513,7 +513,7 @@ define i8 @sel_constants_and_constant(i1 %cond) { ; ALL-LABEL: sel_constants_and_constant: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: rlwinm 3, 3, 0, 31, 31 ; ALL-NEXT: subfic 3, 3, 5 ; ALL-NEXT: blr @@ -524,7 +524,7 @@ define i8 @sel_constants_or_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_or_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -534,14 +534,14 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_or_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: li 4, 23 ; NO_ISEL-NEXT: ori 3, 3, 65533 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 @@ -551,7 +551,7 @@ define i8 @sel_constants_xor_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_xor_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -561,14 +561,14 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_xor_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: li 4, 18 ; NO_ISEL-NEXT: ori 3, 3, 65529 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 @@ -578,7 +578,7 @@ define i8 @sel_constants_shl_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_shl_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: lis 5, 511 ; ISEL-NEXT: lis 4, 2047 ; ISEL-NEXT: andi. 3, 3, 1 @@ -590,7 +590,7 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_shl_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: lis 5, 511 ; NO_ISEL-NEXT: lis 4, 2047 ; NO_ISEL-NEXT: andi. 3, 3, 1 @@ -610,7 +610,7 @@ define i8 @sel_constants_lshr_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_lshr_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 7 ; ISEL-NEXT: li 3, 0 @@ -618,7 +618,7 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_lshr_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 7 ; NO_ISEL-NEXT: li 3, 0 @@ -634,7 +634,7 @@ define i8 @sel_constants_ashr_constant(i1 %cond) { ; ALL-LABEL: sel_constants_ashr_constant: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: neg 3, 3 ; ALL-NEXT: blr @@ -645,7 +645,7 @@ define double @sel_constants_fadd_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_fadd_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: addis 4, 2, .LCPI34_0@toc@ha ; ISEL-NEXT: addis 3, 2, .LCPI34_1@toc@ha @@ -656,14 +656,14 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_fadd_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: addis 4, 2, .LCPI34_0@toc@ha ; NO_ISEL-NEXT: addis 3, 2, .LCPI34_1@toc@ha ; NO_ISEL-NEXT: addi 4, 4, .LCPI34_0@toc@l ; NO_ISEL-NEXT: addi 3, 3, .LCPI34_1@toc@l ; NO_ISEL-NEXT: bc 12, 1, .LBB34_2 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: b .LBB34_2 ; NO_ISEL-NEXT: .LBB34_2: @@ -676,7 +676,7 @@ define double @sel_constants_fsub_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_fsub_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: addis 4, 2, .LCPI35_0@toc@ha ; ISEL-NEXT: addis 3, 2, .LCPI35_1@toc@ha @@ -687,14 +687,14 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_fsub_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: addis 4, 2, .LCPI35_0@toc@ha ; NO_ISEL-NEXT: addis 3, 2, .LCPI35_1@toc@ha ; NO_ISEL-NEXT: addi 4, 4, .LCPI35_0@toc@l ; NO_ISEL-NEXT: addi 3, 3, .LCPI35_1@toc@l ; NO_ISEL-NEXT: bc 12, 1, .LBB35_2 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: b .LBB35_2 ; NO_ISEL-NEXT: .LBB35_2: @@ -707,7 +707,7 @@ define double @sel_constants_fmul_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_fmul_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: addis 4, 2, .LCPI36_0@toc@ha ; ISEL-NEXT: addis 3, 2, .LCPI36_1@toc@ha @@ -718,14 +718,14 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_fmul_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: addis 4, 2, .LCPI36_0@toc@ha ; NO_ISEL-NEXT: addis 3, 2, .LCPI36_1@toc@ha ; NO_ISEL-NEXT: addi 4, 4, .LCPI36_0@toc@l ; NO_ISEL-NEXT: addi 3, 3, .LCPI36_1@toc@l ; NO_ISEL-NEXT: bc 12, 1, .LBB36_2 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: b .LBB36_2 ; NO_ISEL-NEXT: .LBB36_2: @@ -738,7 +738,7 @@ define double @sel_constants_fdiv_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_fdiv_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: addis 4, 2, .LCPI37_0@toc@ha ; ISEL-NEXT: addis 3, 2, .LCPI37_1@toc@ha @@ -749,14 +749,14 @@ ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_fdiv_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: addis 4, 2, .LCPI37_0@toc@ha ; NO_ISEL-NEXT: addis 3, 2, .LCPI37_1@toc@ha ; NO_ISEL-NEXT: addi 4, 4, .LCPI37_0@toc@l ; NO_ISEL-NEXT: addi 3, 3, .LCPI37_1@toc@l ; NO_ISEL-NEXT: bc 12, 1, .LBB37_2 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: b .LBB37_2 ; NO_ISEL-NEXT: .LBB37_2: @@ -769,10 +769,10 @@ define double @sel_constants_frem_constant(i1 %cond) { ; ALL-LABEL: sel_constants_frem_constant: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: andi. 3, 3, 1 ; ALL-NEXT: bc 12, 1, .LBB38_2 -; ALL-NEXT: # BB#1: +; ALL-NEXT: # %bb.1: ; ALL-NEXT: addis 3, 2, .LCPI38_0@toc@ha ; ALL-NEXT: addi 3, 3, .LCPI38_0@toc@l ; ALL-NEXT: lxsdx 1, 0, 3 Index: llvm/trunk/test/CodeGen/PowerPC/setcc-logic.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/setcc-logic.ll +++ llvm/trunk/test/CodeGen/PowerPC/setcc-logic.ll @@ -3,7 +3,7 @@ define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) { ; CHECK-LABEL: all_bits_clear: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: cntlzw 3, 3 ; CHECK-NEXT: srwi 3, 3, 5 @@ -16,7 +16,7 @@ define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) { ; CHECK-LABEL: all_sign_bits_clear: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: nor 3, 3, 3 ; CHECK-NEXT: srwi 3, 3, 31 @@ -29,7 +29,7 @@ define zeroext i1 @all_bits_set(i32 %P, i32 %Q) { ; CHECK-LABEL: all_bits_set: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: li 5, -1 ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: xor 3, 3, 5 @@ -44,7 +44,7 @@ define zeroext i1 @all_sign_bits_set(i32 %P, i32 %Q) { ; CHECK-LABEL: all_sign_bits_set: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: srwi 3, 3, 31 ; CHECK-NEXT: blr @@ -56,7 +56,7 @@ define zeroext i1 @any_bits_set(i32 %P, i32 %Q) { ; CHECK-LABEL: any_bits_set: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: cntlzw 3, 3 ; CHECK-NEXT: srwi 3, 3, 5 @@ -70,7 +70,7 @@ define zeroext i1 @any_sign_bits_set(i32 %P, i32 %Q) { ; CHECK-LABEL: any_sign_bits_set: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: srwi 3, 3, 31 ; CHECK-NEXT: blr @@ -82,7 +82,7 @@ define zeroext i1 @any_bits_clear(i32 %P, i32 %Q) { ; CHECK-LABEL: any_bits_clear: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: li 5, -1 ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: xor 3, 3, 5 @@ -98,7 +98,7 @@ define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q) { ; CHECK-LABEL: any_sign_bits_clear: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: nor 3, 3, 3 ; CHECK-NEXT: srwi 3, 3, 31 @@ -112,10 +112,10 @@ ; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0 define i32 @all_bits_clear_branch(i32* %P, i32* %Q) { ; CHECK-LABEL: all_bits_clear_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: or. 3, 3, 4 ; CHECK-NEXT: bne 0, .LBB8_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB8_2: # %return @@ -136,11 +136,11 @@ define i32 @all_sign_bits_clear_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: all_sign_bits_clear_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, 0 ; CHECK-NEXT: blt 0, .LBB9_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB9_2: # %return @@ -161,11 +161,11 @@ define i32 @all_bits_set_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: all_bits_set_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, -1 ; CHECK-NEXT: bne 0, .LBB10_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB10_2: # %return @@ -186,11 +186,11 @@ define i32 @all_sign_bits_set_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: all_sign_bits_set_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, -1 ; CHECK-NEXT: bgt 0, .LBB11_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB11_2: # %return @@ -212,10 +212,10 @@ ; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0 define i32 @any_bits_set_branch(i32* %P, i32* %Q) { ; CHECK-LABEL: any_bits_set_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: or. 3, 3, 4 ; CHECK-NEXT: beq 0, .LBB12_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB12_2: # %return @@ -236,11 +236,11 @@ define i32 @any_sign_bits_set_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: any_sign_bits_set_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, -1 ; CHECK-NEXT: bgt 0, .LBB13_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB13_2: # %return @@ -261,11 +261,11 @@ define i32 @any_bits_clear_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: any_bits_clear_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, -1 ; CHECK-NEXT: beq 0, .LBB14_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB14_2: # %return @@ -286,11 +286,11 @@ define i32 @any_sign_bits_clear_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: any_sign_bits_clear_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, 0 ; CHECK-NEXT: blt 0, .LBB15_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB15_2: # %return @@ -311,7 +311,7 @@ define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: all_bits_clear_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xxlxor 36, 36, 36 ; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: vcmpequw 2, 2, 4 @@ -324,7 +324,7 @@ define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: all_sign_bits_clear_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisb 4, -1 ; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: vcmpgtsw 2, 2, 4 @@ -337,7 +337,7 @@ define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: all_bits_set_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisb 4, -1 ; CHECK-NEXT: xxland 34, 34, 35 ; CHECK-NEXT: vcmpequw 2, 2, 4 @@ -350,7 +350,7 @@ define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: all_sign_bits_set_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xxlxor 36, 36, 36 ; CHECK-NEXT: xxland 34, 34, 35 ; CHECK-NEXT: vcmpgtsw 2, 4, 2 @@ -363,7 +363,7 @@ define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: any_bits_set_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xxlxor 36, 36, 36 ; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: vcmpequw 2, 2, 4 @@ -377,7 +377,7 @@ define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: any_sign_bits_set_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xxlxor 36, 36, 36 ; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: vcmpgtsw 2, 4, 2 @@ -390,7 +390,7 @@ define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: any_bits_clear_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisb 4, -1 ; CHECK-NEXT: xxland 34, 34, 35 ; CHECK-NEXT: vcmpequw 2, 2, 4 @@ -404,7 +404,7 @@ define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: any_sign_bits_clear_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisb 4, -1 ; CHECK-NEXT: xxland 34, 34, 35 ; CHECK-NEXT: vcmpgtsw 2, 2, 4 @@ -417,7 +417,7 @@ define zeroext i1 @ne_neg1_and_ne_zero(i64 %x) { ; CHECK-LABEL: ne_neg1_and_ne_zero: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: li 4, 1 ; CHECK-NEXT: addi 3, 3, 1 ; CHECK-NEXT: subfc 3, 3, 4 @@ -434,7 +434,7 @@ define zeroext i1 @and_eq(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i16 zeroext %d) { ; CHECK-LABEL: and_eq: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xor 5, 5, 6 ; CHECK-NEXT: xor 3, 3, 4 ; CHECK-NEXT: or 3, 3, 5 @@ -449,7 +449,7 @@ define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: or_ne: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xor 5, 5, 6 ; CHECK-NEXT: xor 3, 3, 4 ; CHECK-NEXT: or 3, 3, 5 @@ -467,7 +467,7 @@ define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) { ; CHECK-LABEL: and_eq_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: vcmpequw 19, 4, 5 ; CHECK-NEXT: xxland 34, 34, 51 Index: llvm/trunk/test/CodeGen/PowerPC/setcc-to-sub.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/setcc-to-sub.ll +++ llvm/trunk/test/CodeGen/PowerPC/setcc-to-sub.ll @@ -8,7 +8,7 @@ ; Function Attrs: norecurse nounwind readonly define zeroext i1 @test1(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 { ; CHECK-LABEL: test1: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz 3, 0(3) ; CHECK-NEXT: lwz 4, 0(4) ; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28 @@ -30,7 +30,7 @@ ; Function Attrs: norecurse nounwind readonly define zeroext i1 @test2(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 { ; CHECK-LABEL: test2: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz 3, 0(3) ; CHECK-NEXT: lwz 4, 0(4) ; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28 @@ -53,7 +53,7 @@ ; Function Attrs: norecurse nounwind readonly define zeroext i1 @test3(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 { ; CHECK-LABEL: test3: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz 3, 0(3) ; CHECK-NEXT: lwz 4, 0(4) ; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28 @@ -75,7 +75,7 @@ ; Function Attrs: norecurse nounwind readonly define zeroext i1 @test4(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 { ; CHECK-LABEL: test4: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz 3, 0(3) ; CHECK-NEXT: lwz 4, 0(4) ; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28 Index: llvm/trunk/test/CodeGen/PowerPC/shift_mask.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/shift_mask.ll +++ llvm/trunk/test/CodeGen/PowerPC/shift_mask.ll @@ -4,7 +4,7 @@ define i8 @test000(i8 %a, i8 %b) { ; CHECK-LABEL: test000: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 29, 31 ; CHECK-NEXT: slw 3, 3, 4 ; CHECK-NEXT: blr @@ -15,7 +15,7 @@ define i16 @test001(i16 %a, i16 %b) { ; CHECK-LABEL: test001: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 28, 31 ; CHECK-NEXT: slw 3, 3, 4 ; CHECK-NEXT: blr @@ -26,7 +26,7 @@ define i32 @test002(i32 %a, i32 %b) { ; CHECK-LABEL: test002: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 27, 31 ; CHECK-NEXT: slw 3, 3, 4 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ define i64 @test003(i64 %a, i64 %b) { ; CHECK-LABEL: test003: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 26, 31 ; CHECK-NEXT: sld 3, 3, 4 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ define <16 x i8> @test010(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: test010: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vslb 2, 2, 3 ; CHECK-NEXT: blr %rem = and <16 x i8> %b, @@ -58,7 +58,7 @@ define <8 x i16> @test011(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: test011: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vslh 2, 2, 3 ; CHECK-NEXT: blr %rem = and <8 x i16> %b, @@ -68,7 +68,7 @@ define <4 x i32> @test012(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test012: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vslw 2, 2, 3 ; CHECK-NEXT: blr %rem = and <4 x i32> %b, @@ -78,7 +78,7 @@ define <2 x i64> @test013(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test013: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsld 2, 2, 3 ; CHECK-NEXT: blr %rem = and <2 x i64> %b, @@ -88,7 +88,7 @@ define i8 @test100(i8 %a, i8 %b) { ; CHECK-LABEL: test100: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 3, 3, 0, 24, 31 ; CHECK-NEXT: rlwinm 4, 4, 0, 29, 31 ; CHECK-NEXT: srw 3, 3, 4 @@ -100,7 +100,7 @@ define i16 @test101(i16 %a, i16 %b) { ; CHECK-LABEL: test101: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 3, 3, 0, 16, 31 ; CHECK-NEXT: rlwinm 4, 4, 0, 28, 31 ; CHECK-NEXT: srw 3, 3, 4 @@ -112,7 +112,7 @@ define i32 @test102(i32 %a, i32 %b) { ; CHECK-LABEL: test102: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 27, 31 ; CHECK-NEXT: srw 3, 3, 4 ; CHECK-NEXT: blr @@ -123,7 +123,7 @@ define i64 @test103(i64 %a, i64 %b) { ; CHECK-LABEL: test103: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 26, 31 ; CHECK-NEXT: srd 3, 3, 4 ; CHECK-NEXT: blr @@ -134,7 +134,7 @@ define <16 x i8> @test110(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: test110: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrb 2, 2, 3 ; CHECK-NEXT: blr %rem = and <16 x i8> %b, @@ -144,7 +144,7 @@ define <8 x i16> @test111(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: test111: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrh 2, 2, 3 ; CHECK-NEXT: blr %rem = and <8 x i16> %b, @@ -154,7 +154,7 @@ define <4 x i32> @test112(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test112: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrw 2, 2, 3 ; CHECK-NEXT: blr %rem = and <4 x i32> %b, @@ -164,7 +164,7 @@ define <2 x i64> @test113(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test113: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrd 2, 2, 3 ; CHECK-NEXT: blr %rem = and <2 x i64> %b, @@ -174,7 +174,7 @@ define i8 @test200(i8 %a, i8 %b) { ; CHECK-LABEL: test200: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: extsb 3, 3 ; CHECK-NEXT: rlwinm 4, 4, 0, 29, 31 ; CHECK-NEXT: sraw 3, 3, 4 @@ -186,7 +186,7 @@ define i16 @test201(i16 %a, i16 %b) { ; CHECK-LABEL: test201: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: extsh 3, 3 ; CHECK-NEXT: rlwinm 4, 4, 0, 28, 31 ; CHECK-NEXT: sraw 3, 3, 4 @@ -198,7 +198,7 @@ define i32 @test202(i32 %a, i32 %b) { ; CHECK-LABEL: test202: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 27, 31 ; CHECK-NEXT: sraw 3, 3, 4 ; CHECK-NEXT: blr @@ -209,7 +209,7 @@ define i64 @test203(i64 %a, i64 %b) { ; CHECK-LABEL: test203: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 26, 31 ; CHECK-NEXT: srad 3, 3, 4 ; CHECK-NEXT: blr @@ -220,7 +220,7 @@ define <16 x i8> @test210(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: test210: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrab 2, 2, 3 ; CHECK-NEXT: blr %rem = and <16 x i8> %b, @@ -230,7 +230,7 @@ define <8 x i16> @test211(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: test211: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrah 2, 2, 3 ; CHECK-NEXT: blr %rem = and <8 x i16> %b, @@ -240,7 +240,7 @@ define <4 x i32> @test212(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test212: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsraw 2, 2, 3 ; CHECK-NEXT: blr %rem = and <4 x i32> %b, @@ -250,7 +250,7 @@ define <2 x i64> @test213(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test213: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrad 2, 2, 3 ; CHECK-NEXT: blr %rem = and <2 x i64> %b, Index: llvm/trunk/test/CodeGen/PowerPC/sjlj.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/sjlj.ll +++ llvm/trunk/test/CodeGen/PowerPC/sjlj.ll @@ -77,7 +77,7 @@ ; CHECK: bcl 20, 31, .LBB1_3 ; CHECK: li 3, 1 ; CHECK: #EH_SjLj_Setup .LBB1_3 -; CHECK: # BB#1: +; CHECK: # %bb.1: ; CHECK: .LBB1_3: ; CHECK: mflr [[REGL:[0-9]+]] Index: llvm/trunk/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll +++ llvm/trunk/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll @@ -12,17 +12,17 @@ ; Function Attrs: nounwind ; CHECK-LABEL: tail_dup_fallthrough_with_branch -; CHECK: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %entry ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} -; CHECK: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %entry ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} -; CHECK: # %sw.0 +; CHECK: # %bb.{{[0-9]+}}: # %sw.0 ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} ; CHECK: # %sw.1 ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} ; CHECK: # %sw.default ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} -; CHECK: # %if.then +; CHECK: # %bb.{{[0-9]+}}: # %if.then ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} ; CHECK: # %if.else ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} Index: llvm/trunk/test/CodeGen/PowerPC/tail-dup-layout.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/tail-dup-layout.ll +++ llvm/trunk/test/CodeGen/PowerPC/tail-dup-layout.ll @@ -278,7 +278,7 @@ ;CHECK: addi ;CHECK: .[[CHECKLABEL:[._0-9A-Za-z]+]]: # %for.check ;CHECK: lwz [[TAGREG:[0-9]+]], 0([[TAGPTRREG]]) -;CHECK: # %test1 +;CHECK: # %bb.{{[0-9]+}}: # %test1 ;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1 ;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[._0-9A-Za-z]+]] ;CHECK-NEXT: # %test2 @@ -366,12 +366,12 @@ ; code is independent of the outlining code, which works by choosing the ; "unavoidable" blocks. ; CHECK-LABEL: avoidable_test: -; CHECK: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %entry ; CHECK: andi. -; CHECK: # %test2 +; CHECK: # %bb.{{[0-9]+}}: # %test2 ; Make sure then2 falls through from test2 ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} -; CHECK: # %then2 +; CHECK: # %bb.{{[0-9]+}}: # %then2 ; CHECK: rlwinm. {{[0-9]+}}, {{[0-9]+}}, 0, 29, 29 ; CHECK: # %else1 ; CHECK: bl a @@ -420,8 +420,8 @@ ; The f;g->h;i trellis should be resolved as f->i;g->h. ; The h;i->j;ret trellis contains a triangle edge, and should be resolved as ; h->j->ret -; CHECK: # %entry -; CHECK: # %c10 +; CHECK: # %bb.{{[0-9]+}}: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %c10 ; CHECK: # %e9 ; CHECK: # %g10 ; CHECK: # %h10 @@ -504,8 +504,8 @@ ; checking, it's profitable to duplicate G into F. The weights here are not ; really important. They are there to help make the test stable. ; CHECK-LABEL: trellis_then_dup_test -; CHECK: # %entry -; CHECK: # %b +; CHECK: # %bb.{{[0-9]+}}: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %b ; CHECK: # %d ; CHECK: # %g ; CHECK: # %ret1 @@ -568,8 +568,8 @@ ; Verify that we did not mis-identify triangle trellises if it is not ; really a triangle. ; CHECK-LABEL: trellis_no_triangle -; CHECK: # %entry -; CHECK: # %b +; CHECK: # %bb.{{[0-9]+}}: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %b ; CHECK: # %d ; CHECK: # %ret ; CHECK: # %c Index: llvm/trunk/test/CodeGen/PowerPC/testBitReverse.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testBitReverse.ll +++ llvm/trunk/test/CodeGen/PowerPC/testBitReverse.ll @@ -4,7 +4,7 @@ declare i32 @llvm.bitreverse.i32(i32) define i32 @testBitReverseIntrinsicI32(i32 %arg) { ; CHECK-LABEL: testBitReverseIntrinsicI32: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lis 4, -21846 ; CHECK-NEXT: lis 5, 21845 ; CHECK-NEXT: slwi 6, 3, 1 @@ -44,7 +44,7 @@ declare i64 @llvm.bitreverse.i64(i64) define i64 @testBitReverseIntrinsicI64(i64 %arg) { ; CHECK-LABEL: testBitReverseIntrinsicI64: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lis 4, -21846 ; CHECK-NEXT: lis 5, 21845 ; CHECK-NEXT: lis 6, -13108 Index: llvm/trunk/test/CodeGen/PowerPC/testComparesi32gtu.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesi32gtu.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesi32gtu.ll @@ -11,7 +11,7 @@ ; Function Attrs: nounwind define i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1) { ; CHECK-LABEL: testCompare1: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: lbz r3, 0(r3) ; CHECK-DAG: clrlwi r3, r3, 31 ; CHECK-DAG: clrldi r3, r3, 32 @@ -35,7 +35,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @testCompare2(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: testCompare2: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-DAG: rlwinm r3, r3, 0, 31, 31 ; CHECK-DAG: rlwinm r4, r4, 0, 31, 31 ; CHECK-DAG: clrldi r3, r3, 32 Index: llvm/trunk/test/CodeGen/PowerPC/testComparesi32leu.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesi32leu.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesi32leu.ll @@ -8,7 +8,7 @@ define signext i32 @test(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: rlwinm r3, r3, 0, 31, 31 ; CHECK-NEXT: rlwinm r4, r4, 0, 31, 31 ; CHECK-NEXT: clrldi r3, r3, 32 Index: llvm/trunk/test/CodeGen/PowerPC/testComparesi32ltu.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesi32ltu.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesi32ltu.ll @@ -11,7 +11,7 @@ ; Function Attrs: nounwind define i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1) { ; CHECK-LABEL: testCompare1: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: lbz r3, 0(r3) ; CHECK-DAG: clrlwi r3, r3, 31 ; CHECK-DAG: clrldi r3, r3, 32 @@ -35,7 +35,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @testCompare2(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: testCompare2: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-DAG: rlwinm r3, r3, 0, 31, 31 ; CHECK-DAG: rlwinm r4, r4, 0, 31, 31 ; CHECK-DAG: clrldi r3, r3, 32 Index: llvm/trunk/test/CodeGen/PowerPC/testComparesieqsc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesieqsc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesieqsc.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ieqsc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ieqsc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsc_z(i8 signext %a) { ; CHECK-LABEL: test_ieqsc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsc_sext_z(i8 signext %a) { ; CHECK-LABEL: test_ieqsc_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ieqsc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ieqsc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsc_z_store(i8 signext %a) { ; CHECK-LABEL: test_ieqsc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsc_sext_z_store(i8 signext %a) { ; CHECK-LABEL: test_ieqsc_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesieqsi.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesieqsi.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesieqsi.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ieqsi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ieqsi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsi_z(i32 signext %a) { ; CHECK-LABEL: test_ieqsi_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsi_sext_z(i32 signext %a) { ; CHECK-LABEL: test_ieqsi_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ieqsi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ieqsi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsi_z_store(i32 signext %a) { ; CHECK-LABEL: test_ieqsi_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsi_sext_z_store(i32 signext %a) { ; CHECK-LABEL: test_ieqsi_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesieqsll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesieqsll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesieqsll.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_ieqsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 @@ -26,7 +26,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_ieqsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 @@ -40,7 +40,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsll_z(i64 %a) { ; CHECK-LABEL: test_ieqsll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsll_sext_z(i64 %a) { ; CHECK-LABEL: test_ieqsll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -66,7 +66,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ieqsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -84,7 +84,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ieqsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -102,7 +102,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsll_z_store(i64 %a) { ; CHECK-LABEL: test_ieqsll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -119,7 +119,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqsll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_ieqsll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesieqss.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesieqss.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesieqss.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqss(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_ieqss: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqss_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_ieqss_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqss_z(i16 signext %a) { ; CHECK-LABEL: test_ieqss_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqss_sext_z(i16 signext %a) { ; CHECK-LABEL: test_ieqss_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqss_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_ieqss_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqss_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_ieqss_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqss_z_store(i16 signext %a) { ; CHECK-LABEL: test_ieqss_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ ; Function Attrs: norecurse nounwind define void @test_ieqss_sext_z_store(i16 signext %a) { ; CHECK-LABEL: test_ieqss_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesiequc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesiequc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesiequc.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequc(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_iequc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequc_sext(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_iequc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequc_z(i8 zeroext %a) { ; CHECK-LABEL: test_iequc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequc_sext_z(i8 zeroext %a) { ; CHECK-LABEL: test_iequc_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequc_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_iequc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequc_sext_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_iequc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequc_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_iequc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequc_sext_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_iequc_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesiequi.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesiequi.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesiequi.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequi(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_iequi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequi_sext(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_iequi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequi_z(i32 zeroext %a) { ; CHECK-LABEL: test_iequi_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequi_sext_z(i32 zeroext %a) { ; CHECK-LABEL: test_iequi_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequi_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_iequi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequi_sext_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_iequi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequi_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_iequi_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequi_sext_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_iequi_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesiequll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesiequll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesiequll.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequll(i64 %a, i64 %b) { ; CHECK-LABEL: test_iequll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 @@ -26,7 +26,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_iequll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 @@ -40,7 +40,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequll_z(i64 %a) { ; CHECK-LABEL: test_iequll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequll_sext_z(i64 %a) { ; CHECK-LABEL: test_iequll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -66,7 +66,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_iequll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -84,7 +84,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_iequll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -102,7 +102,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequll_z_store(i64 %a) { ; CHECK-LABEL: test_iequll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -119,7 +119,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_iequll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesiequs.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesiequs.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesiequs.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequs(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_iequs: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequs_sext(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_iequs_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequs_z(i16 zeroext %a) { ; CHECK-LABEL: test_iequs_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequs_sext_z(i16 zeroext %a) { ; CHECK-LABEL: test_iequs_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequs_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_iequs_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequs_sext_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_iequs_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequs_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_iequs_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ ; Function Attrs: norecurse nounwind define void @test_iequs_sext_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_iequs_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesigesc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesigesc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesigesc.ll @@ -9,7 +9,7 @@ define signext i32 @test_igesc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igesc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ define signext i32 @test_igesc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igesc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ define void @test_igesc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igesc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ define void @test_igesc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igesc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesigesi.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesigesi.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesigesi.ll @@ -9,7 +9,7 @@ define signext i32 @test_igesi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igesi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ define signext i32 @test_igesi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igesi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ define void @test_igesi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igesi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ define void @test_igesi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igesi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesigesll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesigesll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesigesll.ll @@ -9,7 +9,7 @@ define signext i32 @test_igesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_igesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r3, 63 ; CHECK-NEXT: rldicl r6, r4, 1, 63 ; CHECK-NEXT: subfc r3, r4, r3 @@ -23,7 +23,7 @@ define signext i32 @test_igesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_igesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r3, 63 ; CHECK-NEXT: rldicl r6, r4, 1, 63 ; CHECK-NEXT: subfc r3, r4, r3 @@ -38,7 +38,7 @@ define signext i32 @test_igesll_z(i64 %a) { ; CHECK-LABEL: test_igesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 ; CHECK-NEXT: blr @@ -50,7 +50,7 @@ define signext i32 @test_igesll_sext_z(i64 %a) { ; CHECK-LABEL: test_igesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: not r3, r3 ; CHECK-NEXT: blr @@ -62,7 +62,7 @@ define void @test_igesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_igesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r3, 63 ; CHECK: subfc r3, r4, r3 ; CHECK: rldicl r3, r4, 1, 63 @@ -78,7 +78,7 @@ define void @test_igesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_igesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r6, r3, 63 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: subfc r3, r4, r3 @@ -97,7 +97,7 @@ define void @test_igesll_z_store(i64 %a) { ; CHECK-LABEL: test_igesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -113,7 +113,7 @@ define void @test_igesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_igesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesigess.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesigess.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesigess.ll @@ -9,7 +9,7 @@ define signext i32 @test_igess(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igess: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ define signext i32 @test_igess_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igess_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ define void @test_igess_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igess_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ define void @test_igess_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igess_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesigtsc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesigtsc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesigtsc.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igtsc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igtsc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsc_z(i8 signext %a) { ; CHECK-LABEL: test_igtsc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: blr @@ -62,7 +62,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtsc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igtsc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -75,7 +75,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtsc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igtsc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: @@ -89,7 +89,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtsc_z_store(i8 signext %a) { ; CHECK-LABEL: test_igtsc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesigtsi.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesigtsi.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesigtsi.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igtsi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igtsi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsi_z(i32 signext %a) { ; CHECK-LABEL: test_igtsi_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: blr @@ -62,7 +62,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtsi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igtsi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -75,7 +75,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtsi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igtsi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: @@ -89,7 +89,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtsi_z_store(i32 signext %a) { ; CHECK-LABEL: test_igtsi_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesigtsll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesigtsll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesigtsll.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_igtsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4 @@ -26,7 +26,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_igtsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4 @@ -44,7 +44,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsll_z(i64 %a) { ; CHECK-LABEL: test_igtsll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: nor r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 @@ -70,7 +70,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_igtsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4 @@ -87,7 +87,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_igtsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4 @@ -105,7 +105,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtsll_z_store(i64 %a) { ; CHECK-LABEL: test_igtsll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesigtss.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesigtss.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesigtss.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtss(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igtss: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG1:r[0-9]+]], r4, r3 ; CHECK-NEXT: rldicl r3, [[REG1]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtss_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igtss_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtss_z(i16 signext %a) { ; CHECK-LABEL: test_igtss_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: blr @@ -50,7 +50,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtss_sext_z(i16 signext %a) { ; CHECK-LABEL: test_igtss_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: neg [[REG2:r[0-9]+]], r3 ; CHECK-NEXT: sradi r3, [[REG2]], 63 ; CHECK-NEXT: blr @@ -63,7 +63,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtss_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igtss_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG1:r[0-9]+]], r4, r3 ; CHECK: rldicl {{r[0-9]+}}, [[REG1]], 1, 63 entry: @@ -76,7 +76,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtss_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igtss_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: @@ -90,7 +90,7 @@ ; Function Attrs: norecurse nounwind define void @test_igtss_z_store(i16 signext %a) { ; CHECK-LABEL: test_igtss_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesilesc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesilesc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesilesc.ll @@ -9,7 +9,7 @@ define signext i32 @test_ilesc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ilesc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ define signext i32 @test_ilesc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ilesc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ define void @test_ilesc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ilesc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ define void @test_ilesc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ilesc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesilesi.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesilesi.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesilesi.ll @@ -9,7 +9,7 @@ define signext i32 @test_ilesi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ilesi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ define signext i32 @test_ilesi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ilesi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ define void @test_ilesi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ilesi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ define void @test_ilesi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ilesi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesilesll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesilesll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesilesll.ll @@ -9,7 +9,7 @@ define signext i32 @test_ilesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_ilesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r4, 63 ; CHECK-NEXT: rldicl r6, r3, 1, 63 ; CHECK-NEXT: subfc r12, r3, r4 @@ -23,7 +23,7 @@ define signext i32 @test_ilesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_ilesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r4, 63 ; CHECK-NEXT: rldicl r6, r3, 1, 63 ; CHECK-NEXT: subfc r12, r3, r4 @@ -38,7 +38,7 @@ define signext i32 @test_ilesll_z(i64 %a) { ; CHECK-LABEL: test_ilesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: or r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 @@ -51,7 +51,7 @@ define signext i32 @test_ilesll_sext_z(i64 %a) { ; CHECK-LABEL: test_ilesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: or r3, r4, r3 ; CHECK-NEXT: sradi r3, r3, 63 @@ -64,7 +64,7 @@ define void @test_ilesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ilesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r4, 63 ; CHECK: subfc r4, r3, r4 ; CHECK: rldicl r3, r3, 1, 63 @@ -80,7 +80,7 @@ define void @test_ilesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ilesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r4, 63 ; CHECK-DAG: rldicl r3, r3, 1, 63 ; CHECK-DAG: subfc r4, r3, r4 @@ -97,7 +97,7 @@ define void @test_ilesll_z_store(i64 %a) { ; CHECK-LABEL: test_ilesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -114,7 +114,7 @@ define void @test_ilesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_ilesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesiless.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesiless.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesiless.ll @@ -9,7 +9,7 @@ define signext i32 @test_iless(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iless: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ define signext i32 @test_iless_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iless_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ define void @test_iless_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iless_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ define void @test_iless_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iless_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesiltsc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesiltsc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesiltsc.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_iltsc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -24,7 +24,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_iltsc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ ; Function Attrs: norecurse nounwind define void @test_iltsc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_iltsc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -61,7 +61,7 @@ ; Function Attrs: norecurse nounwind define void @test_iltsc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_iltsc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: Index: llvm/trunk/test/CodeGen/PowerPC/testComparesiltsi.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesiltsi.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesiltsi.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_iltsi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -24,7 +24,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_iltsi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsi_sext_z(i32 signext %a) { ; CHECK-LABEL: test_iltsi_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: srawi r3, r3, 31 ; CHECK-NEXT: blr entry: @@ -49,7 +49,7 @@ ; Function Attrs: norecurse nounwind define void @test_iltsi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_iltsi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -62,7 +62,7 @@ ; Function Attrs: norecurse nounwind define void @test_iltsi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_iltsi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: Index: llvm/trunk/test/CodeGen/PowerPC/testComparesiltsll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesiltsll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesiltsll.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_iltsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3 @@ -27,7 +27,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_iltsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3 @@ -44,7 +44,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsll_sext_z(i64 %a) { ; CHECK-LABEL: test_iltsll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: blr entry: @@ -56,7 +56,7 @@ ; Function Attrs: norecurse nounwind define void @test_iltsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_iltsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3 @@ -73,7 +73,7 @@ ; Function Attrs: norecurse nounwind define void @test_iltsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_iltsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3 Index: llvm/trunk/test/CodeGen/PowerPC/testComparesiltss.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesiltss.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesiltss.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltss(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iltss: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -24,7 +24,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltss_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iltss_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ ; Function Attrs: norecurse nounwind define void @test_iltss_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iltss_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -61,7 +61,7 @@ ; Function Attrs: norecurse nounwind define void @test_iltss_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iltss_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: Index: llvm/trunk/test/CodeGen/PowerPC/testComparesinesll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesinesll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesinesll.ll @@ -10,7 +10,7 @@ define signext i32 @test_inesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_inesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 @@ -23,7 +23,7 @@ define signext i32 @test_inesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_inesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 @@ -36,7 +36,7 @@ define signext i32 @test_inesll_z(i64 %a) { ; CHECK-LABEL: test_inesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ define signext i32 @test_inesll_sext_z(i64 %a) { ; CHECK-LABEL: test_inesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -60,7 +60,7 @@ define void @test_inesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_inesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -77,7 +77,7 @@ define void @test_inesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_inesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -94,7 +94,7 @@ define void @test_inesll_z_store(i64 %a) { ; CHECK-LABEL: test_inesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -110,7 +110,7 @@ define void @test_inesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_inesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesineuc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesineuc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesineuc.ll @@ -9,7 +9,7 @@ define signext i32 @test_ineuc(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_ineuc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -23,7 +23,7 @@ define signext i32 @test_ineuc_sext(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_ineuc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -38,7 +38,7 @@ define signext i32 @test_ineuc_z(i8 zeroext %a) { ; CHECK-LABEL: test_ineuc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -51,7 +51,7 @@ define signext i32 @test_ineuc_sext_z(i8 zeroext %a) { ; CHECK-LABEL: test_ineuc_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -65,7 +65,7 @@ define void @test_ineuc_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_ineuc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -83,7 +83,7 @@ define void @test_ineuc_sext_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_ineuc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -102,7 +102,7 @@ define void @test_ineuc_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_ineuc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -119,7 +119,7 @@ define void @test_ineuc_sext_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_ineuc_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 Index: llvm/trunk/test/CodeGen/PowerPC/testComparesineull.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesineull.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesineull.ll @@ -10,7 +10,7 @@ define signext i32 @test_ineull(i64 %a, i64 %b) { ; CHECK-LABEL: test_ineull: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 @@ -23,7 +23,7 @@ define signext i32 @test_ineull_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_ineull_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 @@ -36,7 +36,7 @@ define signext i32 @test_ineull_z(i64 %a) { ; CHECK-LABEL: test_ineull_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ define signext i32 @test_ineull_sext_z(i64 %a) { ; CHECK-LABEL: test_ineull_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -60,7 +60,7 @@ define void @test_ineull_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ineull_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -77,7 +77,7 @@ define void @test_ineull_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ineull_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -94,7 +94,7 @@ define void @test_ineull_z_store(i64 %a) { ; CHECK-LABEL: test_ineull_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -110,7 +110,7 @@ define void @test_ineull_sext_z_store(i64 %a) { ; CHECK-LABEL: test_ineull_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesineus.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesineus.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesineus.ll @@ -10,7 +10,7 @@ define signext i32 @test_ineus(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_ineus: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -24,7 +24,7 @@ define signext i32 @test_ineus_sext(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_ineus_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -39,7 +39,7 @@ define signext i32 @test_ineus_z(i16 zeroext %a) { ; CHECK-LABEL: test_ineus_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -52,7 +52,7 @@ define signext i32 @test_ineus_sext_z(i16 zeroext %a) { ; CHECK-LABEL: test_ineus_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -66,7 +66,7 @@ define void @test_ineus_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_ineus_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -84,7 +84,7 @@ define void @test_ineus_sext_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_ineus_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -103,7 +103,7 @@ define void @test_ineus_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_ineus_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -120,7 +120,7 @@ define void @test_ineus_sext_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_ineus_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 Index: llvm/trunk/test/CodeGen/PowerPC/testCompareslleqsc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testCompareslleqsc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testCompareslleqsc.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lleqsc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lleqsc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsc_z(i8 signext %a) { ; CHECK-LABEL: test_lleqsc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsc_sext_z(i8 signext %a) { ; CHECK-LABEL: test_lleqsc_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lleqsc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lleqsc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsc_z_store(i8 signext %a) { ; CHECK-LABEL: test_lleqsc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsc_sext_z_store(i8 signext %a) { ; CHECK-LABEL: test_lleqsc_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testCompareslleqsi.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testCompareslleqsi.ll +++ llvm/trunk/test/CodeGen/PowerPC/testCompareslleqsi.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lleqsi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -25,7 +25,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lleqsi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -40,7 +40,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsi_z(i32 signext %a) { ; CHECK-LABEL: test_lleqsi_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsi_sext_z(i32 signext %a) { ; CHECK-LABEL: test_lleqsi_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -67,7 +67,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lleqsi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -85,7 +85,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lleqsi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -104,7 +104,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsi_z_store(i32 signext %a) { ; CHECK-LABEL: test_lleqsi_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsi_sext_z_store(i32 signext %a) { ; CHECK-LABEL: test_lleqsi_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testCompareslleqsll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testCompareslleqsll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testCompareslleqsll.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_lleqsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 @@ -25,7 +25,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_lleqsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 @@ -39,7 +39,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsll_z(i64 %a) { ; CHECK-LABEL: test_lleqsll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 ; CHECK-NEXT: blr @@ -52,7 +52,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsll_sext_z(i64 %a) { ; CHECK-LABEL: test_lleqsll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -65,7 +65,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_lleqsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -83,7 +83,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_lleqsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -101,7 +101,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsll_z_store(i64 %a) { ; CHECK-LABEL: test_lleqsll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -118,7 +118,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqsll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_lleqsll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testCompareslleqss.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testCompareslleqss.ll +++ llvm/trunk/test/CodeGen/PowerPC/testCompareslleqss.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqss(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_lleqss: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -25,7 +25,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqss_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_lleqss_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -40,7 +40,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqss_z(i16 signext %a) { ; CHECK-LABEL: test_lleqss_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqss_sext_z(i16 signext %a) { ; CHECK-LABEL: test_lleqss_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -67,7 +67,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqss_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_lleqss_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -85,7 +85,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqss_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_lleqss_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -104,7 +104,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqss_z_store(i16 signext %a) { ; CHECK-LABEL: test_lleqss_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -121,7 +121,7 @@ ; Function Attrs: norecurse nounwind define void @test_lleqss_sext_z_store(i16 signext %a) { ; CHECK-LABEL: test_lleqss_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllequc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllequc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllequc.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequc(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llequc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -25,7 +25,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequc_sext(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llequc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -40,7 +40,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequc_z(i8 zeroext %a) { ; CHECK-LABEL: test_llequc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequc_sext_z(i8 zeroext %a) { ; CHECK-LABEL: test_llequc_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -67,7 +67,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequc_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llequc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -85,7 +85,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequc_sext_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llequc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -104,7 +104,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequc_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_llequc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -121,7 +121,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequc_sext_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_llequc_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllequi.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllequi.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllequi.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequi(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llequi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -25,7 +25,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequi_sext(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llequi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -40,7 +40,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequi_z(i32 zeroext %a) { ; CHECK-LABEL: test_llequi_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequi_sext_z(i32 zeroext %a) { ; CHECK-LABEL: test_llequi_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -67,7 +67,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequi_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llequi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -85,7 +85,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequi_sext_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llequi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -104,7 +104,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequi_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_llequi_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -121,7 +121,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequi_sext_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_llequi_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllequll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllequll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllequll.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequll(i64 %a, i64 %b) { ; CHECK-LABEL: test_llequll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 @@ -25,7 +25,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llequll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 @@ -39,7 +39,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequll_z(i64 %a) { ; CHECK-LABEL: test_llequll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 ; CHECK-NEXT: blr @@ -52,7 +52,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequll_sext_z(i64 %a) { ; CHECK-LABEL: test_llequll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -65,7 +65,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llequll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -83,7 +83,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llequll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -101,7 +101,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequll_z_store(i64 %a) { ; CHECK-LABEL: test_llequll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -118,7 +118,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_llequll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllequs.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllequs.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllequs.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequs(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llequs: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -25,7 +25,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequs_sext(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llequs_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -40,7 +40,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequs_z(i16 zeroext %a) { ; CHECK-LABEL: test_llequs_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequs_sext_z(i16 zeroext %a) { ; CHECK-LABEL: test_llequs_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -67,7 +67,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequs_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llequs_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -85,7 +85,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequs_sext_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llequs_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -104,7 +104,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequs_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_llequs_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -121,7 +121,7 @@ ; Function Attrs: norecurse nounwind define void @test_llequs_sext_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_llequs_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllgesc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllgesc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllgesc.ll @@ -9,7 +9,7 @@ define i64 @test_llgesc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_llgesc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ define i64 @test_llgesc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_llgesc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ define void @test_llgesc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_llgesc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ define void @test_llgesc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_llgesc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllgesi.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllgesi.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllgesi.ll @@ -9,7 +9,7 @@ define i64 @test_llgesi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_llgesi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ define i64 @test_llgesi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_llgesi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ define void @test_llgesi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_llgesi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ define void @test_llgesi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_llgesi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllgesll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllgesll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllgesll.ll @@ -9,7 +9,7 @@ define i64 @test_llgesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r3, 63 ; CHECK-NEXT: rldicl r6, r4, 1, 63 ; CHECK-NEXT: subfc r3, r4, r3 @@ -23,7 +23,7 @@ define i64 @test_llgesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r3, 63 ; CHECK-NEXT: rldicl r6, r4, 1, 63 ; CHECK-NEXT: subfc r3, r4, r3 @@ -38,7 +38,7 @@ define i64 @test_llgesll_z(i64 %a) { ; CHECK-LABEL: test_llgesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 ; CHECK-NEXT: blr @@ -50,7 +50,7 @@ define i64 @test_llgesll_sext_z(i64 %a) { ; CHECK-LABEL: test_llgesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: not r3, r3 ; CHECK-NEXT: blr @@ -62,7 +62,7 @@ define void @test_llgesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r3, 63 ; CHECK: subfc r3, r4, r3 ; CHECK: rldicl r3, r4, 1, 63 @@ -78,7 +78,7 @@ define void @test_llgesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r6, r3, 63 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: subfc r3, r4, r3 @@ -97,7 +97,7 @@ define void @test_llgesll_z_store(i64 %a) { ; CHECK-LABEL: test_llgesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -113,7 +113,7 @@ define void @test_llgesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_llgesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllgess.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllgess.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllgess.ll @@ -9,7 +9,7 @@ define i64 @test_llgess(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llgess: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ define i64 @test_llgess_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llgess_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ define void @test_llgess_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llgess_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ define void @test_llgess_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llgess_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllgtsll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllgtsll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllgtsll.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgtsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4 @@ -26,7 +26,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgtsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4 @@ -44,7 +44,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtsll_z(i64 %a) { ; CHECK-LABEL: test_llgtsll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: nor r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 @@ -70,7 +70,7 @@ ; Function Attrs: norecurse nounwind define void @test_llgtsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgtsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4 @@ -87,7 +87,7 @@ ; Function Attrs: norecurse nounwind define void @test_llgtsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgtsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4 @@ -105,7 +105,7 @@ ; Function Attrs: norecurse nounwind define void @test_llgtsll_z_store(i64 %a) { ; CHECK-LABEL: test_llgtsll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllgtus.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllgtus.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllgtus.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtus(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llgtus: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtus_sext(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llgtus_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -36,7 +36,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtus_z(i16 zeroext %a) { ; CHECK-LABEL: test_llgtus_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -50,7 +50,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtus_sext_z(i16 zeroext %a) { ; CHECK-LABEL: test_llgtus_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -65,7 +65,7 @@ ; Function Attrs: norecurse nounwind define void @test_llgtus_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llgtus_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -78,7 +78,7 @@ ; Function Attrs: norecurse nounwind define void @test_llgtus_sext_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llgtus_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: @@ -91,7 +91,7 @@ ; Function Attrs: norecurse nounwind define void @test_llgtus_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_llgtus_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -109,7 +109,7 @@ ; Function Attrs: norecurse nounwind define void @test_llgtus_sext_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_llgtus_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 Index: llvm/trunk/test/CodeGen/PowerPC/testCompareslllesc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testCompareslllesc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testCompareslllesc.ll @@ -10,7 +10,7 @@ define i64 @test_lllesc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lllesc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -23,7 +23,7 @@ define i64 @test_lllesc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lllesc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -36,7 +36,7 @@ define void @test_lllesc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lllesc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -53,7 +53,7 @@ define void @test_lllesc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lllesc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testCompareslllesi.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testCompareslllesi.ll +++ llvm/trunk/test/CodeGen/PowerPC/testCompareslllesi.ll @@ -10,7 +10,7 @@ define i64 @test_lllesi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lllesi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -23,7 +23,7 @@ define i64 @test_lllesi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lllesi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -36,7 +36,7 @@ define void @test_lllesi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lllesi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -53,7 +53,7 @@ define void @test_lllesi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lllesi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testCompareslllesll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testCompareslllesll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testCompareslllesll.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lllesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_lllesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r4, 63 ; CHECK-NEXT: rldicl r6, r3, 1, 63 ; CHECK-NEXT: subfc r12, r3, r4 @@ -25,7 +25,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lllesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_lllesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r4, 63 ; CHECK-NEXT: rldicl r6, r3, 1, 63 ; CHECK-NEXT: subfc r12, r3, r4 @@ -41,7 +41,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lllesll_z(i64 %a) { ; CHECK-LABEL: test_lllesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: or r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 @@ -55,7 +55,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lllesll_sext_z(i64 %a) { ; CHECK-LABEL: test_lllesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: or r3, r4, r3 ; CHECK-NEXT: sradi r3, r3, 63 @@ -69,7 +69,7 @@ ; Function Attrs: norecurse nounwind define void @test_lllesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_lllesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r4, 63 ; CHECK: subfc r4, r3, r4 ; CHECK: rldicl r3, r3, 1, 63 @@ -86,7 +86,7 @@ ; Function Attrs: norecurse nounwind define void @test_lllesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_lllesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r4, 63 ; CHECK-DAG: rldicl r3, r3, 1, 63 ; CHECK-DAG: subfc r4, r3, r4 @@ -104,7 +104,7 @@ ; Function Attrs: norecurse nounwind define void @test_lllesll_z_store(i64 %a) { ; CHECK-LABEL: test_lllesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ ; Function Attrs: norecurse nounwind define void @test_lllesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_lllesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllless.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllless.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllless.ll @@ -10,7 +10,7 @@ define i64 @test_llless(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llless: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -23,7 +23,7 @@ define i64 @test_llless_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llless_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -36,7 +36,7 @@ define void @test_llless_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llless_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -53,7 +53,7 @@ define void @test_llless_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llless_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllltsll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllltsll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllltsll.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_llltsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3 @@ -27,7 +27,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llltsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3 @@ -44,7 +44,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltsll_sext_z(i64 %a) { ; CHECK-LABEL: test_llltsll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: blr entry: @@ -56,7 +56,7 @@ ; Function Attrs: norecurse nounwind define void @test_llltsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llltsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3 @@ -73,7 +73,7 @@ ; Function Attrs: norecurse nounwind define void @test_llltsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llltsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3 Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllltuc.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllltuc.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllltuc.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltuc(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llltuc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltuc_sext(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llltuc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -36,7 +36,7 @@ ; Function Attrs: norecurse nounwind define void @test_llltuc_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llltuc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[2-9]+]], r3, r4 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -49,7 +49,7 @@ ; Function Attrs: norecurse nounwind define void @test_llltuc_sext_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llltuc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllltui.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllltui.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllltui.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltui(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llltui: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NOT: clrldi ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 @@ -24,7 +24,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltui_sext(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llltui_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltui_z(i32 zeroext %a) { ; CHECK-LABEL: test_llltui_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li r3, 0 ; CHECK-NEXT: blr entry: @@ -47,7 +47,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltui_sext_z(i32 zeroext %a) { ; CHECK-LABEL: test_llltui_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li r3, 0 ; CHECK-NEXT: blr entry: @@ -57,7 +57,7 @@ ; Function Attrs: norecurse nounwind define void @test_llltui_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llltui_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NOT: clrldi ; CHECK: sub [[REG:r[2-9]+]], r3, r4 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 @@ -71,7 +71,7 @@ ; Function Attrs: norecurse nounwind define void @test_llltui_sext_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llltui_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NOT: clrldi ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 @@ -85,7 +85,7 @@ ; Function Attrs: norecurse nounwind define void @test_llltui_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_llltui_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: li [[REG:r[0-9]+]], 0 ; CHECK: stw [[REG]], 0(r3) ; CHECK-NEXT: blr @@ -97,7 +97,7 @@ ; Function Attrs: norecurse nounwind define void @test_llltui_sext_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_llltui_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: li [[REG:r[0-9]+]], 0 ; CHECK: stw [[REG]], 0(r3) ; CHECK-NEXT: blr Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllltus.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllltus.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllltus.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltus(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llltus: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltus_sext(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llltus_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ ; Function Attrs: norecurse nounwind define void @test_llltus_sext_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llltus_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllnesll.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllnesll.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllnesll.ll @@ -10,7 +10,7 @@ define i64 @test_llnesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_llnesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 @@ -23,7 +23,7 @@ define i64 @test_llnesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llnesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 @@ -36,7 +36,7 @@ define i64 @test_llnesll_z(i64 %a) { ; CHECK-LABEL: test_llnesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ define i64 @test_llnesll_sext_z(i64 %a) { ; CHECK-LABEL: test_llnesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -60,7 +60,7 @@ define void @test_llnesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llnesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -77,7 +77,7 @@ define void @test_llnesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llnesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -94,7 +94,7 @@ define void @test_llnesll_z_store(i64 %a) { ; CHECK-LABEL: test_llnesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -110,7 +110,7 @@ define void @test_llnesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_llnesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/testComparesllneull.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/testComparesllneull.ll +++ llvm/trunk/test/CodeGen/PowerPC/testComparesllneull.ll @@ -10,7 +10,7 @@ define i64 @test_llneull(i64 %a, i64 %b) { ; CHECK-LABEL: test_llneull: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 @@ -23,7 +23,7 @@ define i64 @test_llneull_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llneull_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 @@ -36,7 +36,7 @@ define i64 @test_llneull_z(i64 %a) { ; CHECK-LABEL: test_llneull_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ define i64 @test_llneull_sext_z(i64 %a) { ; CHECK-LABEL: test_llneull_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -60,7 +60,7 @@ define void @test_llneull_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llneull_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -77,7 +77,7 @@ define void @test_llneull_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llneull_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -94,7 +94,7 @@ define void @test_llneull_z_store(i64 %a) { ; CHECK-LABEL: test_llneull_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -110,7 +110,7 @@ define void @test_llneull_sext_z_store(i64 %a) { ; CHECK-LABEL: test_llneull_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) Index: llvm/trunk/test/CodeGen/PowerPC/vec_add_sub_quadword.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/vec_add_sub_quadword.ll +++ llvm/trunk/test/CodeGen/PowerPC/vec_add_sub_quadword.ll @@ -8,7 +8,7 @@ %result = add <1 x i128> %x, %tmpvec ret <1 x i128> %result ; CHECK-LABEL: @out_of_bounds_insertelement -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: blr } Index: llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9.ll +++ llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9.ll @@ -4,12 +4,12 @@ define zeroext i8 @test1(<16 x i8> %a, i32 signext %index) { ; CHECK-LE-LABEL: test1: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextubrx 3, 5, 2 ; CHECK-LE-NEXT: clrldi 3, 3, 56 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test1: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextublx 3, 5, 2 ; CHECK-BE-NEXT: clrldi 3, 3, 56 ; CHECK-BE-NEXT: blr @@ -21,12 +21,12 @@ define signext i8 @test2(<16 x i8> %a, i32 signext %index) { ; CHECK-LE-LABEL: test2: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextubrx 3, 5, 2 ; CHECK-LE-NEXT: extsb 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test2: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextublx 3, 5, 2 ; CHECK-BE-NEXT: extsb 3, 3 ; CHECK-BE-NEXT: blr @@ -38,13 +38,13 @@ define zeroext i16 @test3(<8 x i16> %a, i32 signext %index) { ; CHECK-LE-LABEL: test3: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-LE-NEXT: vextuhrx 3, 3, 2 ; CHECK-LE-NEXT: clrldi 3, 3, 48 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test3: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-BE-NEXT: vextuhlx 3, 3, 2 ; CHECK-BE-NEXT: clrldi 3, 3, 48 @@ -57,13 +57,13 @@ define signext i16 @test4(<8 x i16> %a, i32 signext %index) { ; CHECK-LE-LABEL: test4: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-LE-NEXT: vextuhrx 3, 3, 2 ; CHECK-LE-NEXT: extsh 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test4: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-BE-NEXT: vextuhlx 3, 3, 2 ; CHECK-BE-NEXT: extsh 3, 3 @@ -76,12 +76,12 @@ define zeroext i32 @test5(<4 x i32> %a, i32 signext %index) { ; CHECK-LE-LABEL: test5: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test5: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: blr @@ -93,13 +93,13 @@ define signext i32 @test6(<4 x i32> %a, i32 signext %index) { ; CHECK-LE-LABEL: test6: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test6: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: extsw 3, 3 @@ -113,13 +113,13 @@ ; Test with immediate index define zeroext i8 @test7(<16 x i8> %a) { ; CHECK-LE-LABEL: test7: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: li 3, 1 ; CHECK-LE-NEXT: vextubrx 3, 3, 2 ; CHECK-LE-NEXT: clrldi 3, 3, 56 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test7: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 1 ; CHECK-BE-NEXT: vextublx 3, 3, 2 ; CHECK-BE-NEXT: clrldi 3, 3, 56 @@ -132,13 +132,13 @@ define zeroext i16 @test8(<8 x i16> %a) { ; CHECK-LE-LABEL: test8: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: li 3, 2 ; CHECK-LE-NEXT: vextuhrx 3, 3, 2 ; CHECK-LE-NEXT: clrldi 3, 3, 48 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test8: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 2 ; CHECK-BE-NEXT: vextuhlx 3, 3, 2 ; CHECK-BE-NEXT: clrldi 3, 3, 48 @@ -151,12 +151,12 @@ define zeroext i32 @test9(<4 x i32> %a) { ; CHECK-LE-LABEL: test9: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: li 3, 12 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test9: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 12 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: blr Index: llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9_2.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9_2.ll +++ llvm/trunk/test/CodeGen/PowerPC/vec_extract_p9_2.ll @@ -4,13 +4,13 @@ define zeroext i8 @test_add1(<16 x i8> %a, i32 signext %index, i8 zeroext %c) { ; CHECK-LE-LABEL: test_add1: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextubrx 3, 5, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: rlwinm 3, 3, 0, 24, 31 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add1: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextublx 3, 5, 2 ; CHECK-BE-NEXT: add 3, 3, 6 ; CHECK-BE-NEXT: rlwinm 3, 3, 0, 24, 31 @@ -26,13 +26,13 @@ define signext i8 @test_add2(<16 x i8> %a, i32 signext %index, i8 signext %c) { ; CHECK-LE-LABEL: test_add2: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextubrx 3, 5, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: extsb 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add2: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextublx 3, 5, 2 ; CHECK-BE-NEXT: add 3, 3, 6 ; CHECK-BE-NEXT: extsb 3, 3 @@ -48,14 +48,14 @@ define zeroext i16 @test_add3(<8 x i16> %a, i32 signext %index, i16 zeroext %c) { ; CHECK-LE-LABEL: test_add3: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-LE-NEXT: vextuhrx 3, 3, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: rlwinm 3, 3, 0, 16, 31 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add3: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-BE-NEXT: vextuhlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 6 @@ -72,14 +72,14 @@ define signext i16 @test_add4(<8 x i16> %a, i32 signext %index, i16 signext %c) { ; CHECK-LE-LABEL: test_add4: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-LE-NEXT: vextuhrx 3, 3, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: extsh 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add4: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-BE-NEXT: vextuhlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 6 @@ -96,14 +96,14 @@ define zeroext i32 @test_add5(<4 x i32> %a, i32 signext %index, i32 zeroext %c) { ; CHECK-LE-LABEL: test_add5: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: clrldi 3, 3, 32 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add5: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 6 @@ -117,14 +117,14 @@ define signext i32 @test_add6(<4 x i32> %a, i32 signext %index, i32 signext %c) { ; CHECK-LE-LABEL: test_add6: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add6: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 6 @@ -139,11 +139,11 @@ ; When extracting word element 2 on LE, it's better to use mfvsrwz rather than vextuwrx define zeroext i32 @test7(<4 x i32> %a) { ; CHECK-LE-LABEL: test7: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: mfvsrwz 3, 34 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test7: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 8 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: blr @@ -154,13 +154,13 @@ define zeroext i32 @testadd_7(<4 x i32> %a, i32 zeroext %c) { ; CHECK-LE-LABEL: testadd_7: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: mfvsrwz 3, 34 ; CHECK-LE-NEXT: add 3, 3, 5 ; CHECK-LE-NEXT: clrldi 3, 3, 32 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: testadd_7: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 8 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 5 @@ -174,12 +174,12 @@ define signext i32 @test8(<4 x i32> %a) { ; CHECK-LE-LABEL: test8: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: mfvsrwz 3, 34 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test8: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 8 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: extsw 3, 3 @@ -191,13 +191,13 @@ define signext i32 @testadd_8(<4 x i32> %a, i32 signext %c) { ; CHECK-LE-LABEL: testadd_8: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: mfvsrwz 3, 34 ; CHECK-LE-NEXT: add 3, 3, 5 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: testadd_8: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 8 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 5 @@ -212,13 +212,13 @@ ; When extracting word element 1 on BE, it's better to use mfvsrwz rather than vextuwlx define signext i32 @test9(<4 x i32> %a) { ; CHECK-LE-LABEL: test9: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: li 3, 4 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test9: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: mfvsrwz 3, 34 ; CHECK-BE-NEXT: extsw 3, 3 ; CHECK-BE-NEXT: blr @@ -229,14 +229,14 @@ define signext i32 @testadd_9(<4 x i32> %a, i32 signext %c) { ; CHECK-LE-LABEL: testadd_9: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: li 3, 4 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: add 3, 3, 5 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: testadd_9: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: mfvsrwz 3, 34 ; CHECK-BE-NEXT: add 3, 3, 5 ; CHECK-BE-NEXT: extsw 3, 3 Index: llvm/trunk/test/CodeGen/PowerPC/vec_int_ext.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/vec_int_ext.ll +++ llvm/trunk/test/CodeGen/PowerPC/vec_int_ext.ll @@ -4,11 +4,11 @@ define <4 x i32> @vextsb2wLE(<16 x i8> %a) { ; CHECK-LE-LABEL: vextsb2wLE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextsb2w 2, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: vextsb2wLE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE: vperm 2, 2, 2, 3 ; CHECK-BE-NEXT: vextsb2w 2, 2 ; CHECK-BE-NEXT: blr @@ -31,11 +31,11 @@ define <2 x i64> @vextsb2dLE(<16 x i8> %a) { ; CHECK-LE-LABEL: vextsb2dLE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextsb2d 2, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: vextsb2dLE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE: vperm 2, 2, 2, 3 ; CHECK-BE-NEXT: vextsb2d 2, 2 ; CHECK-BE-NEXT: blr @@ -52,11 +52,11 @@ define <4 x i32> @vextsh2wLE(<8 x i16> %a) { ; CHECK-LE-LABEL: vextsh2wLE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextsh2w 2, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: vextsh2wLE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE: vperm 2, 2, 2, 3 ; CHECK-BE-NEXT: vextsh2w 2, 2 ; CHECK-BE-NEXT: blr @@ -79,11 +79,11 @@ define <2 x i64> @vextsh2dLE(<8 x i16> %a) { ; CHECK-LE-LABEL: vextsh2dLE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextsh2d 2, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: vextsh2dLE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE: vperm 2, 2, 2, 3 ; CHECK-BE-NEXT: vextsh2d 2, 2 ; CHECK-BE-NEXT: blr @@ -100,11 +100,11 @@ define <2 x i64> @vextsw2dLE(<4 x i32> %a) { ; CHECK-LE-LABEL: vextsw2dLE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextsw2d 2, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: vextsw2dLE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE: vmrgew ; CHECK-BE-NEXT: vextsw2d 2, 2 ; CHECK-BE-NEXT: blr @@ -121,11 +121,11 @@ define <4 x i32> @vextsb2wBE(<16 x i8> %a) { ; CHECK-BE-LABEL: vextsb2wBE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextsb2w 2, 2 ; CHECK-BE-NEXT: blr ; CHECK-LE-LABEL: vextsb2wBE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vsldoi 2, 2, 2, 13 ; CHECK-LE-NEXT: vextsb2w 2, 2 ; CHECK-LE-NEXT: blr @@ -147,11 +147,11 @@ define <2 x i64> @vextsb2dBE(<16 x i8> %a) { ; CHECK-BE-LABEL: vextsb2dBE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextsb2d 2, 2 ; CHECK-BE-NEXT: blr ; CHECK-LE-LABEL: vextsb2dBE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vsldoi 2, 2, 2, 9 ; CHECK-LE-NEXT: vextsb2d 2, 2 ; CHECK-LE-NEXT: blr @@ -167,11 +167,11 @@ define <4 x i32> @vextsh2wBE(<8 x i16> %a) { ; CHECK-BE-LABEL: vextsh2wBE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextsh2w 2, 2 ; CHECK-BE-NEXT: blr ; CHECK-LE-LABEL: vextsh2wBE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vsldoi 2, 2, 2, 14 ; CHECK-LE-NEXT: vextsh2w 2, 2 ; CHECK-LE-NEXT: blr @@ -193,11 +193,11 @@ define <2 x i64> @vextsh2dBE(<8 x i16> %a) { ; CHECK-BE-LABEL: vextsh2dBE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextsh2d 2, 2 ; CHECK-BE-NEXT: blr ; CHECK-LE-LABEL: vextsh2dBE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vsldoi 2, 2, 2, 10 ; CHECK-LE-NEXT: vextsh2d 2, 2 ; CHECK-LE-NEXT: blr @@ -213,11 +213,11 @@ define <2 x i64> @vextsw2dBE(<4 x i32> %a) { ; CHECK-BE-LABEL: vextsw2dBE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextsw2d 2, 2 ; CHECK-BE-NEXT: blr ; CHECK-LE-LABEL: vextsw2dBE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vsldoi 2, 2, 2, 12 ; CHECK-LE-NEXT: vextsw2d 2, 2 ; CHECK-LE-NEXT: blr @@ -233,11 +233,11 @@ define <2 x i64> @vextDiffVectors(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LE-LABEL: vextDiffVectors: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NOT: vextsw2d ; CHECK-BE-LABEL: vextDiffVectors: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NOT: vextsw2d entry: %vecext = extractelement <4 x i32> %a, i32 0 @@ -252,11 +252,11 @@ define <8 x i16> @testInvalidExtend(<16 x i8> %a) { entry: ; CHECK-LE-LABEL: testInvalidExtend: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NOT: vexts ; CHECK-BE-LABEL: testInvalidExtend: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NOT: vexts %vecext = extractelement <16 x i8> %a, i32 0 Index: llvm/trunk/test/CodeGen/PowerPC/vec_revb.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/vec_revb.ll +++ llvm/trunk/test/CodeGen/PowerPC/vec_revb.ll @@ -3,7 +3,7 @@ define <8 x i16> @testXXBRH(<8 x i16> %a) { ; CHECK-LABEL: testXXBRH: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxbrh 34, 34 ; CHECK-NEXT: blr @@ -16,7 +16,7 @@ define <4 x i32> @testXXBRW(<4 x i32> %a) { ; CHECK-LABEL: testXXBRW: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxbrw 34, 34 ; CHECK-NEXT: blr @@ -29,7 +29,7 @@ define <2 x double> @testXXBRD(<2 x double> %a) { ; CHECK-LABEL: testXXBRD: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxbrd 34, 34 ; CHECK-NEXT: blr @@ -42,7 +42,7 @@ define <1 x i128> @testXXBRQ(<1 x i128> %a) { ; CHECK-LABEL: testXXBRQ: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxbrq 34, 34 ; CHECK-NEXT: blr Index: llvm/trunk/test/CodeGen/PowerPC/vselect-constants.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/vselect-constants.ll +++ llvm/trunk/test/CodeGen/PowerPC/vselect-constants.ll @@ -9,7 +9,7 @@ define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_C1_or_C2_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, -16 ; CHECK-NEXT: vspltisw 4, 15 ; CHECK-NEXT: addis 3, 2, .LCPI0_0@toc@ha @@ -29,7 +29,7 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_C1_or_C2_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: addis 3, 2, .LCPI1_0@toc@ha ; CHECK-NEXT: addis 4, 2, .LCPI1_1@toc@ha @@ -46,7 +46,7 @@ define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_Cplus1_or_C_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, 1 ; CHECK-NEXT: addis 3, 2, .LCPI2_0@toc@ha ; CHECK-NEXT: addi 3, 3, .LCPI2_0@toc@l @@ -60,7 +60,7 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_Cplus1_or_C_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: addis 3, 2, .LCPI3_0@toc@ha ; CHECK-NEXT: addi 3, 3, .LCPI3_0@toc@l @@ -74,7 +74,7 @@ define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_Cminus1_or_C_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, -16 ; CHECK-NEXT: vspltisw 4, 15 ; CHECK-NEXT: addis 3, 2, .LCPI4_0@toc@ha @@ -91,7 +91,7 @@ define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_Cminus1_or_C_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: addis 3, 2, .LCPI5_0@toc@ha ; CHECK-NEXT: addi 3, 3, .LCPI5_0@toc@l @@ -105,7 +105,7 @@ define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_minus1_or_0_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, -16 ; CHECK-NEXT: vspltisw 4, 15 ; CHECK-NEXT: vsubuwm 3, 4, 3 @@ -118,7 +118,7 @@ define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_minus1_or_0_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: blr %cond = icmp eq <4 x i32> %x, %y @@ -128,7 +128,7 @@ define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_0_or_minus1_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, 1 ; CHECK-NEXT: vspltisb 4, -1 ; CHECK-NEXT: xxland 34, 34, 35 @@ -140,7 +140,7 @@ define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_0_or_minus1_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: xxlnor 34, 34, 34 ; CHECK-NEXT: blr @@ -151,7 +151,7 @@ define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_1_or_0_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, 1 ; CHECK-NEXT: xxland 34, 34, 35 ; CHECK-NEXT: blr @@ -161,7 +161,7 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_1_or_0_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: vspltisw 19, 1 ; CHECK-NEXT: xxland 34, 34, 51 @@ -173,7 +173,7 @@ define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_0_or_1_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, 1 ; CHECK-NEXT: xxlandc 34, 35, 34 ; CHECK-NEXT: blr @@ -183,7 +183,7 @@ define <4 x i32> @cmp_sel_0_or_1_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_0_or_1_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: vspltisw 19, 1 ; CHECK-NEXT: xxlnor 0, 34, 34 Index: llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll +++ llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll @@ -6,7 +6,7 @@ define i64 @addc_adde(i64 %a, i64 %b) { ; RV32I-LABEL: addc_adde: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: add a1, a1, a3 ; RV32I-NEXT: add a2, a0, a2 ; RV32I-NEXT: sltu a0, a2, a0 @@ -19,7 +19,7 @@ define i64 @subc_sube(i64 %a, i64 %b) { ; RV32I-LABEL: subc_sube: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sltu a3, a0, a2 ; RV32I-NEXT: sub a1, a1, a3 Index: llvm/trunk/test/CodeGen/RISCV/alu32.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/alu32.ll +++ llvm/trunk/test/CodeGen/RISCV/alu32.ll @@ -10,7 +10,7 @@ define i32 @addi(i32 %a) nounwind { ; RV32I-LABEL: addi: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = add i32 %a, 1 @@ -19,7 +19,7 @@ define i32 @slti(i32 %a) nounwind { ; RV32I-LABEL: slti: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slti a0, a0, 2 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp slt i32 %a, 2 @@ -29,7 +29,7 @@ define i32 @sltiu(i32 %a) nounwind { ; RV32I-LABEL: sltiu: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltiu a0, a0, 3 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp ult i32 %a, 3 @@ -39,7 +39,7 @@ define i32 @xori(i32 %a) nounwind { ; RV32I-LABEL: xori: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: xori a0, a0, 4 ; RV32I-NEXT: jalr zero, ra, 0 %1 = xor i32 %a, 4 @@ -48,7 +48,7 @@ define i32 @ori(i32 %a) nounwind { ; RV32I-LABEL: ori: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: ori a0, a0, 5 ; RV32I-NEXT: jalr zero, ra, 0 %1 = or i32 %a, 5 @@ -57,7 +57,7 @@ define i32 @andi(i32 %a) nounwind { ; RV32I-LABEL: andi: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 6 ; RV32I-NEXT: jalr zero, ra, 0 %1 = and i32 %a, 6 @@ -66,7 +66,7 @@ define i32 @slli(i32 %a) nounwind { ; RV32I-LABEL: slli: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 7 ; RV32I-NEXT: jalr zero, ra, 0 %1 = shl i32 %a, 7 @@ -75,7 +75,7 @@ define i32 @srli(i32 %a) nounwind { ; RV32I-LABEL: srli: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 8 ; RV32I-NEXT: jalr zero, ra, 0 %1 = lshr i32 %a, 8 @@ -84,7 +84,7 @@ define i32 @srai(i32 %a) nounwind { ; RV32I-LABEL: srai: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srai a0, a0, 9 ; RV32I-NEXT: jalr zero, ra, 0 %1 = ashr i32 %a, 9 @@ -95,7 +95,7 @@ define i32 @add(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: add: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = add i32 %a, %b @@ -104,7 +104,7 @@ define i32 @sub(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sub: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = sub i32 %a, %b @@ -113,7 +113,7 @@ define i32 @sll(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sll: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sll a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = shl i32 %a, %b @@ -122,7 +122,7 @@ define i32 @slt(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: slt: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp slt i32 %a, %b @@ -132,7 +132,7 @@ define i32 @sltu(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sltu: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp ult i32 %a, %b @@ -142,7 +142,7 @@ define i32 @xor(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: xor: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = xor i32 %a, %b @@ -151,7 +151,7 @@ define i32 @srl(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: srl: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srl a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = lshr i32 %a, %b @@ -160,7 +160,7 @@ define i32 @sra(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sra: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sra a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = ashr i32 %a, %b @@ -169,7 +169,7 @@ define i32 @or(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: or: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = or i32 %a, %b @@ -178,7 +178,7 @@ define i32 @and(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: and: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = and i32 %a, %b Index: llvm/trunk/test/CodeGen/RISCV/bare-select.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/bare-select.ll +++ llvm/trunk/test/CodeGen/RISCV/bare-select.ll @@ -4,10 +4,10 @@ define i32 @bare_select(i1 %a, i32 %b, i32 %c) { ; RV32I-LABEL: bare_select: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: bne a0, zero, .LBB0_2 -; RV32I-NEXT: # BB#1: +; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: addi a1, a2, 0 ; RV32I-NEXT: .LBB0_2: ; RV32I-NEXT: addi a0, a1, 0 Index: llvm/trunk/test/CodeGen/RISCV/blockaddress.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/blockaddress.ll +++ llvm/trunk/test/CodeGen/RISCV/blockaddress.ll @@ -6,7 +6,7 @@ define void @test_blockaddress() nounwind { ; RV32I-LABEL: test_blockaddress: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 0(s0) ; RV32I-NEXT: lui a0, %hi(addr) ; RV32I-NEXT: addi a0, a0, %lo(addr) Index: llvm/trunk/test/CodeGen/RISCV/branch.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/branch.ll +++ llvm/trunk/test/CodeGen/RISCV/branch.ll @@ -4,7 +4,7 @@ define void @foo(i32 %a, i32 *%b, i1 %c) { ; RV32I-LABEL: foo: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: beq a3, a0, .LBB0_12 ; RV32I-NEXT: jal zero, .LBB0_1 Index: llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll +++ llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll @@ -14,7 +14,7 @@ define i16 @test_bswap_i16(i16 %a) nounwind { ; RV32I-LABEL: test_bswap_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 4080 ; RV32I-NEXT: addi a1, a1, 0 ; RV32I-NEXT: slli a2, a0, 8 @@ -29,7 +29,7 @@ define i32 @test_bswap_i32(i32 %a) nounwind { ; RV32I-LABEL: test_bswap_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -256 ; RV32I-NEXT: srli a2, a0, 8 @@ -50,7 +50,7 @@ define i64 @test_bswap_i64(i64 %a) nounwind { ; RV32I-LABEL: test_bswap_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 16 ; RV32I-NEXT: addi a3, a2, -256 ; RV32I-NEXT: srli a2, a1, 8 @@ -81,7 +81,7 @@ define i8 @test_cttz_i8(i8 %a) nounwind { ; RV32I-LABEL: test_cttz_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, 0 ; RV32I-NEXT: addi a0, zero, 8 @@ -123,7 +123,7 @@ define i16 @test_cttz_i16(i16 %a) nounwind { ; RV32I-LABEL: test_cttz_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, 0 ; RV32I-NEXT: addi a0, zero, 16 @@ -167,7 +167,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind { ; RV32I-LABEL: test_cttz_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, 0 ; RV32I-NEXT: addi a0, zero, 32 @@ -208,7 +208,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind { ; RV32I-LABEL: test_ctlz_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, 0 ; RV32I-NEXT: addi a0, zero, 32 @@ -257,7 +257,7 @@ define i64 @test_cttz_i64(i64 %a) nounwind { ; RV32I-LABEL: test_cttz_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 28(s0) ; RV32I-NEXT: sw s1, 24(s0) ; RV32I-NEXT: sw s2, 20(s0) @@ -311,7 +311,7 @@ ; RV32I-NEXT: addi a1, s3, 0 ; RV32I-NEXT: jalr ra, s6, 0 ; RV32I-NEXT: bne s2, zero, .LBB7_2 -; RV32I-NEXT: # BB#1: +; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: addi s1, a0, 32 ; RV32I-NEXT: .LBB7_2: @@ -332,7 +332,7 @@ define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind { ; RV32I-LABEL: test_cttz_i8_zero_undef: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: xori a0, a0, -1 @@ -367,7 +367,7 @@ define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind { ; RV32I-LABEL: test_cttz_i16_zero_undef: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: xori a0, a0, -1 @@ -402,7 +402,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { ; RV32I-LABEL: test_cttz_i32_zero_undef: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: xori a0, a0, -1 @@ -437,7 +437,7 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind { ; RV32I-LABEL: test_cttz_i64_zero_undef: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 28(s0) ; RV32I-NEXT: sw s1, 24(s0) ; RV32I-NEXT: sw s2, 20(s0) @@ -491,7 +491,7 @@ ; RV32I-NEXT: addi a1, s3, 0 ; RV32I-NEXT: jalr ra, s6, 0 ; RV32I-NEXT: bne s2, zero, .LBB11_2 -; RV32I-NEXT: # BB#1: +; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: addi s1, a0, 32 ; RV32I-NEXT: .LBB11_2: @@ -512,7 +512,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind { ; RV32I-LABEL: test_ctpop_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, 349525 ; RV32I-NEXT: addi a1, a1, 1365 Index: llvm/trunk/test/CodeGen/RISCV/calls.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/calls.ll +++ llvm/trunk/test/CodeGen/RISCV/calls.ll @@ -6,7 +6,7 @@ define i32 @test_call_external(i32 %a) nounwind { ; RV32I-LABEL: test_call_external: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(external_function) ; RV32I-NEXT: addi a1, a1, %lo(external_function) @@ -19,7 +19,7 @@ define i32 @defined_function(i32 %a) nounwind { ; RV32I-LABEL: defined_function: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = add i32 %a, 1 @@ -28,7 +28,7 @@ define i32 @test_call_defined(i32 %a) nounwind { ; RV32I-LABEL: test_call_defined: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(defined_function) ; RV32I-NEXT: addi a1, a1, %lo(defined_function) @@ -41,7 +41,7 @@ define i32 @test_call_indirect(i32 (i32)* %a, i32 %b) nounwind { ; RV32I-LABEL: test_call_indirect: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a2, a0, 0 ; RV32I-NEXT: addi a0, a1, 0 @@ -57,7 +57,7 @@ define fastcc i32 @fastcc_function(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: fastcc_function: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = add i32 %a, %b @@ -66,7 +66,7 @@ define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: test_call_fastcc: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: sw s1, 8(s0) ; RV32I-NEXT: addi s1, a0, 0 Index: llvm/trunk/test/CodeGen/RISCV/div.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/div.ll +++ llvm/trunk/test/CodeGen/RISCV/div.ll @@ -4,7 +4,7 @@ define i32 @udiv(i32 %a, i32 %b) { ; RV32I-LABEL: udiv: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__udivsi3) ; RV32I-NEXT: addi a2, a2, %lo(__udivsi3) @@ -17,7 +17,7 @@ define i32 @udiv_constant(i32 %a) { ; RV32I-LABEL: udiv_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(__udivsi3) ; RV32I-NEXT: addi a2, a1, %lo(__udivsi3) @@ -31,7 +31,7 @@ define i32 @udiv_pow2(i32 %a) { ; RV32I-LABEL: udiv_pow2: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 3 ; RV32I-NEXT: jalr zero, ra, 0 %1 = udiv i32 %a, 8 @@ -40,7 +40,7 @@ define i64 @udiv64(i64 %a, i64 %b) { ; RV32I-LABEL: udiv64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a4, %hi(__udivdi3) ; RV32I-NEXT: addi a4, a4, %lo(__udivdi3) @@ -53,7 +53,7 @@ define i64 @udiv64_constant(i64 %a) { ; RV32I-LABEL: udiv64_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__udivdi3) ; RV32I-NEXT: addi a4, a2, %lo(__udivdi3) @@ -68,7 +68,7 @@ define i32 @sdiv(i32 %a, i32 %b) { ; RV32I-LABEL: sdiv: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__divsi3) ; RV32I-NEXT: addi a2, a2, %lo(__divsi3) @@ -81,7 +81,7 @@ define i32 @sdiv_constant(i32 %a) { ; RV32I-LABEL: sdiv_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(__divsi3) ; RV32I-NEXT: addi a2, a1, %lo(__divsi3) @@ -95,7 +95,7 @@ define i32 @sdiv_pow2(i32 %a) { ; RV32I-LABEL: sdiv_pow2: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: srli a1, a1, 29 ; RV32I-NEXT: add a0, a0, a1 @@ -107,7 +107,7 @@ define i64 @sdiv64(i64 %a, i64 %b) { ; RV32I-LABEL: sdiv64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a4, %hi(__divdi3) ; RV32I-NEXT: addi a4, a4, %lo(__divdi3) @@ -120,7 +120,7 @@ define i64 @sdiv64_constant(i64 %a) { ; RV32I-LABEL: sdiv64_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__divdi3) ; RV32I-NEXT: addi a4, a2, %lo(__divdi3) Index: llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll +++ llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll @@ -7,7 +7,7 @@ define i32 @icmp_eq(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_eq: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: sltiu a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 @@ -18,7 +18,7 @@ define i32 @icmp_ne(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_ne: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: sltu a0, zero, a0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -29,7 +29,7 @@ define i32 @icmp_ugt(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_ugt: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a1, a0 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp ugt i32 %a, %b @@ -39,7 +39,7 @@ define i32 @icmp_uge(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_uge: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a0, a1 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 @@ -50,7 +50,7 @@ define i32 @icmp_ult(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_ult: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp ult i32 %a, %b @@ -60,7 +60,7 @@ define i32 @icmp_ule(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_ule: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a1, a0 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 @@ -71,7 +71,7 @@ define i32 @icmp_sgt(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_sgt: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a1, a0 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp sgt i32 %a, %b @@ -81,7 +81,7 @@ define i32 @icmp_sge(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_sge: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a0, a1 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 @@ -92,7 +92,7 @@ define i32 @icmp_slt(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_slt: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp slt i32 %a, %b @@ -102,7 +102,7 @@ define i32 @icmp_sle(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_sle: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a1, a0 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 Index: llvm/trunk/test/CodeGen/RISCV/imm.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/imm.ll +++ llvm/trunk/test/CodeGen/RISCV/imm.ll @@ -6,7 +6,7 @@ define i32 @zero() nounwind { ; RV32I-LABEL: zero: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, zero, 0 ; RV32I-NEXT: jalr zero, ra, 0 ret i32 0 @@ -14,7 +14,7 @@ define i32 @pos_small() nounwind { ; RV32I-LABEL: pos_small: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, zero, 2047 ; RV32I-NEXT: jalr zero, ra, 0 ret i32 2047 @@ -22,7 +22,7 @@ define i32 @neg_small() nounwind { ; RV32I-LABEL: neg_small: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, zero, -2048 ; RV32I-NEXT: jalr zero, ra, 0 ret i32 -2048 @@ -30,7 +30,7 @@ define i32 @pos_i32() nounwind { ; RV32I-LABEL: pos_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 423811 ; RV32I-NEXT: addi a0, a0, -1297 ; RV32I-NEXT: jalr zero, ra, 0 @@ -39,7 +39,7 @@ define i32 @neg_i32() nounwind { ; RV32I-LABEL: neg_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 912092 ; RV32I-NEXT: addi a0, a0, -273 ; RV32I-NEXT: jalr zero, ra, 0 Index: llvm/trunk/test/CodeGen/RISCV/indirectbr.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/indirectbr.ll +++ llvm/trunk/test/CodeGen/RISCV/indirectbr.ll @@ -4,7 +4,7 @@ define i32 @indirectbr(i8* %target) nounwind { ; RV32I-LABEL: indirectbr: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 0(s0) ; RV32I-NEXT: jalr zero, a0, 0 ; RV32I-NEXT: .LBB0_1: # %ret @@ -20,7 +20,7 @@ define i32 @indirectbr_with_offset(i8* %a) nounwind { ; RV32I-LABEL: indirectbr_with_offset: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 0(s0) ; RV32I-NEXT: jalr zero, a0, 1380 ; RV32I-NEXT: .LBB1_1: # %ret Index: llvm/trunk/test/CodeGen/RISCV/jumptable.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/jumptable.ll +++ llvm/trunk/test/CodeGen/RISCV/jumptable.ll @@ -4,7 +4,7 @@ define void @jt(i32 %in, i32* %out) { ; RV32I-LABEL: jt: -; RV32I: # BB#0: # %entry +; RV32I: # %bb.0: # %entry ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: blt a2, a0, .LBB0_3 ; RV32I-NEXT: jal zero, .LBB0_1 Index: llvm/trunk/test/CodeGen/RISCV/mem.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/mem.ll +++ llvm/trunk/test/CodeGen/RISCV/mem.ll @@ -6,7 +6,7 @@ define i32 @lb(i8 *%a) nounwind { ; RV32I-LABEL: lb: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lb a1, 0(a0) ; RV32I-NEXT: lb a0, 1(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -20,7 +20,7 @@ define i32 @lh(i16 *%a) nounwind { ; RV32I-LABEL: lh: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lh a1, 0(a0) ; RV32I-NEXT: lh a0, 4(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -34,7 +34,7 @@ define i32 @lw(i32 *%a) nounwind { ; RV32I-LABEL: lw: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lw a1, 0(a0) ; RV32I-NEXT: lw a0, 12(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -46,7 +46,7 @@ define i32 @lbu(i8 *%a) nounwind { ; RV32I-LABEL: lbu: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lbu a1, 0(a0) ; RV32I-NEXT: lbu a0, 4(a0) ; RV32I-NEXT: add a0, a0, a1 @@ -62,7 +62,7 @@ define i32 @lhu(i16 *%a) nounwind { ; RV32I-LABEL: lhu: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lhu a1, 0(a0) ; RV32I-NEXT: lhu a0, 10(a0) ; RV32I-NEXT: add a0, a0, a1 @@ -80,7 +80,7 @@ define void @sb(i8 *%a, i8 %b) nounwind { ; RV32I-LABEL: sb: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sb a1, 6(a0) ; RV32I-NEXT: sb a1, 0(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -92,7 +92,7 @@ define void @sh(i16 *%a, i16 %b) nounwind { ; RV32I-LABEL: sh: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sh a1, 14(a0) ; RV32I-NEXT: sh a1, 0(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -104,7 +104,7 @@ define void @sw(i32 *%a, i32 %b) nounwind { ; RV32I-LABEL: sw: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw a1, 32(a0) ; RV32I-NEXT: sw a1, 0(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -117,7 +117,7 @@ ; Check load and store to an i1 location define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind { ; RV32I-LABEL: load_sext_zext_anyext_i1: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lb a1, 0(a0) ; RV32I-NEXT: lbu a1, 1(a0) ; RV32I-NEXT: lbu a0, 2(a0) @@ -139,7 +139,7 @@ define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind { ; RV32I-LABEL: load_sext_zext_anyext_i1_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lb a1, 0(a0) ; RV32I-NEXT: lbu a1, 1(a0) ; RV32I-NEXT: lbu a0, 2(a0) @@ -165,7 +165,7 @@ define i32 @lw_sw_global(i32 %a) nounwind { ; TODO: the addi should be folded in to the lw/sw operations ; RV32I-LABEL: lw_sw_global: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, %hi(G) ; RV32I-NEXT: addi a2, a1, %lo(G) ; RV32I-NEXT: lw a1, 0(a2) @@ -188,7 +188,7 @@ define i32 @lw_sw_constant(i32 %a) nounwind { ; TODO: the addi should be folded in to the lw/sw ; RV32I-LABEL: lw_sw_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 912092 ; RV32I-NEXT: addi a2, a1, -273 ; RV32I-NEXT: lw a1, 0(a2) Index: llvm/trunk/test/CodeGen/RISCV/mul.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/mul.ll +++ llvm/trunk/test/CodeGen/RISCV/mul.ll @@ -4,7 +4,7 @@ define i32 @square(i32 %a) { ; RV32I-LABEL: square: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a1, %lo(__mulsi3) @@ -18,7 +18,7 @@ define i32 @mul(i32 %a, i32 %b) { ; RV32I-LABEL: mul: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) @@ -31,7 +31,7 @@ define i32 @mul_constant(i32 %a) { ; RV32I-LABEL: mul_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a1, %lo(__mulsi3) @@ -45,7 +45,7 @@ define i32 @mul_pow2(i32 %a) { ; RV32I-LABEL: mul_pow2: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 3 ; RV32I-NEXT: jalr zero, ra, 0 %1 = mul i32 %a, 8 @@ -54,7 +54,7 @@ define i64 @mul64(i64 %a, i64 %b) { ; RV32I-LABEL: mul64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a4, %hi(__muldi3) ; RV32I-NEXT: addi a4, a4, %lo(__muldi3) @@ -67,7 +67,7 @@ define i64 @mul64_constant(i64 %a) { ; RV32I-LABEL: mul64_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__muldi3) ; RV32I-NEXT: addi a4, a2, %lo(__muldi3) Index: llvm/trunk/test/CodeGen/RISCV/rem.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/rem.ll +++ llvm/trunk/test/CodeGen/RISCV/rem.ll @@ -4,7 +4,7 @@ define i32 @urem(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: urem: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__umodsi3) ; RV32I-NEXT: addi a2, a2, %lo(__umodsi3) @@ -17,7 +17,7 @@ define i32 @srem(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: srem: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__modsi3) ; RV32I-NEXT: addi a2, a2, %lo(__modsi3) Index: llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll +++ llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll @@ -7,7 +7,7 @@ define i32 @rotl(i32 %x, i32 %y) { ; RV32I-LABEL: rotl: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a2, zero, 32 ; RV32I-NEXT: sub a2, a2, a1 ; RV32I-NEXT: sll a1, a0, a1 @@ -23,7 +23,7 @@ define i32 @rotr(i32 %x, i32 %y) { ; RV32I-LABEL: rotr: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a2, zero, 32 ; RV32I-NEXT: sub a2, a2, a1 ; RV32I-NEXT: srl a1, a0, a1 Index: llvm/trunk/test/CodeGen/RISCV/select-cc.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/select-cc.ll +++ llvm/trunk/test/CodeGen/RISCV/select-cc.ll @@ -4,55 +4,55 @@ define i32 @foo(i32 %a, i32 *%b) { ; RV32I-LABEL: foo: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: beq a0, a2, .LBB0_2 -; RV32I-NEXT: # BB#1: +; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_2: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bne a0, a2, .LBB0_4 -; RV32I-NEXT: # BB#3: +; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_4: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bltu a2, a0, .LBB0_6 -; RV32I-NEXT: # BB#5: +; RV32I-NEXT: # %bb.5: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_6: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bgeu a0, a2, .LBB0_8 -; RV32I-NEXT: # BB#7: +; RV32I-NEXT: # %bb.7: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_8: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bltu a0, a2, .LBB0_10 -; RV32I-NEXT: # BB#9: +; RV32I-NEXT: # %bb.9: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_10: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bgeu a2, a0, .LBB0_12 -; RV32I-NEXT: # BB#11: +; RV32I-NEXT: # %bb.11: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_12: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: blt a2, a0, .LBB0_14 -; RV32I-NEXT: # BB#13: +; RV32I-NEXT: # %bb.13: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_14: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bge a0, a2, .LBB0_16 -; RV32I-NEXT: # BB#15: +; RV32I-NEXT: # %bb.15: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_16: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: blt a0, a2, .LBB0_18 -; RV32I-NEXT: # BB#17: +; RV32I-NEXT: # %bb.17: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_18: ; RV32I-NEXT: lw a1, 0(a1) ; RV32I-NEXT: bge a1, a0, .LBB0_20 -; RV32I-NEXT: # BB#19: +; RV32I-NEXT: # %bb.19: ; RV32I-NEXT: addi a0, a1, 0 ; RV32I-NEXT: .LBB0_20: ; RV32I-NEXT: jalr zero, ra, 0 Index: llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll +++ llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll @@ -4,7 +4,7 @@ define i8 @sext_i1_to_i8(i1 %a) { ; RV32I-LABEL: sext_i1_to_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: sub a0, zero, a0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -14,7 +14,7 @@ define i16 @sext_i1_to_i16(i1 %a) { ; RV32I-LABEL: sext_i1_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: sub a0, zero, a0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -24,7 +24,7 @@ define i32 @sext_i1_to_i32(i1 %a) { ; RV32I-LABEL: sext_i1_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: sub a0, zero, a0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -34,7 +34,7 @@ define i64 @sext_i1_to_i64(i1 %a) { ; RV32I-LABEL: sext_i1_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: sub a0, zero, a0 ; RV32I-NEXT: addi a1, a0, 0 @@ -45,7 +45,7 @@ define i16 @sext_i8_to_i16(i8 %a) { ; RV32I-LABEL: sext_i8_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: jalr zero, ra, 0 @@ -55,7 +55,7 @@ define i32 @sext_i8_to_i32(i8 %a) { ; RV32I-LABEL: sext_i8_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: jalr zero, ra, 0 @@ -65,7 +65,7 @@ define i64 @sext_i8_to_i64(i8 %a) { ; RV32I-LABEL: sext_i8_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a1, a0, 24 ; RV32I-NEXT: srai a0, a1, 24 ; RV32I-NEXT: srai a1, a1, 31 @@ -76,7 +76,7 @@ define i32 @sext_i16_to_i32(i16 %a) { ; RV32I-LABEL: sext_i16_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: jalr zero, ra, 0 @@ -86,7 +86,7 @@ define i64 @sext_i16_to_i64(i16 %a) { ; RV32I-LABEL: sext_i16_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a1, a0, 16 ; RV32I-NEXT: srai a0, a1, 16 ; RV32I-NEXT: srai a1, a1, 31 @@ -97,7 +97,7 @@ define i64 @sext_i32_to_i64(i32 %a) { ; RV32I-LABEL: sext_i32_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: jalr zero, ra, 0 %1 = sext i32 %a to i64 @@ -106,7 +106,7 @@ define i8 @zext_i1_to_i8(i1 %a) { ; RV32I-LABEL: zext_i1_to_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i1 %a to i8 @@ -115,7 +115,7 @@ define i16 @zext_i1_to_i16(i1 %a) { ; RV32I-LABEL: zext_i1_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i1 %a to i16 @@ -124,7 +124,7 @@ define i32 @zext_i1_to_i32(i1 %a) { ; RV32I-LABEL: zext_i1_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i1 %a to i32 @@ -133,7 +133,7 @@ define i64 @zext_i1_to_i64(i1 %a) { ; RV32I-LABEL: zext_i1_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: addi a1, zero, 0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -143,7 +143,7 @@ define i16 @zext_i8_to_i16(i8 %a) { ; RV32I-LABEL: zext_i8_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i8 %a to i16 @@ -152,7 +152,7 @@ define i32 @zext_i8_to_i32(i8 %a) { ; RV32I-LABEL: zext_i8_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i8 %a to i32 @@ -161,7 +161,7 @@ define i64 @zext_i8_to_i64(i8 %a) { ; RV32I-LABEL: zext_i8_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: addi a1, zero, 0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -171,7 +171,7 @@ define i32 @zext_i16_to_i32(i16 %a) { ; RV32I-LABEL: zext_i16_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 @@ -182,7 +182,7 @@ define i64 @zext_i16_to_i64(i16 %a) { ; RV32I-LABEL: zext_i16_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 @@ -194,7 +194,7 @@ define i64 @zext_i32_to_i64(i32 %a) { ; RV32I-LABEL: zext_i32_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 0 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i32 %a to i64 @@ -206,7 +206,7 @@ define i1 @trunc_i8_to_i1(i8 %a) { ; RV32I-LABEL: trunc_i8_to_i1: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i8 %a to i1 ret i1 %1 @@ -214,7 +214,7 @@ define i1 @trunc_i16_to_i1(i16 %a) { ; RV32I-LABEL: trunc_i16_to_i1: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i16 %a to i1 ret i1 %1 @@ -222,7 +222,7 @@ define i1 @trunc_i32_to_i1(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i1: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i32 %a to i1 ret i1 %1 @@ -230,7 +230,7 @@ define i1 @trunc_i64_to_i1(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i1: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i64 %a to i1 ret i1 %1 @@ -238,7 +238,7 @@ define i8 @trunc_i16_to_i8(i16 %a) { ; RV32I-LABEL: trunc_i16_to_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i16 %a to i8 ret i8 %1 @@ -246,7 +246,7 @@ define i8 @trunc_i32_to_i8(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i32 %a to i8 ret i8 %1 @@ -254,7 +254,7 @@ define i8 @trunc_i64_to_i8(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i64 %a to i8 ret i8 %1 @@ -262,7 +262,7 @@ define i16 @trunc_i32_to_i16(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i32 %a to i16 ret i16 %1 @@ -270,7 +270,7 @@ define i16 @trunc_i64_to_i16(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i64 %a to i16 ret i16 %1 @@ -278,7 +278,7 @@ define i32 @trunc_i64_to_i32(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i64 %a to i32 ret i32 %1 Index: llvm/trunk/test/CodeGen/RISCV/shifts.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/shifts.ll +++ llvm/trunk/test/CodeGen/RISCV/shifts.ll @@ -7,7 +7,7 @@ define i64 @lshr64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: lshr64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a3, %hi(__lshrdi3) ; RV32I-NEXT: addi a3, a3, %lo(__lshrdi3) @@ -20,7 +20,7 @@ define i64 @ashr64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: ashr64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a3, %hi(__ashrdi3) ; RV32I-NEXT: addi a3, a3, %lo(__ashrdi3) @@ -33,7 +33,7 @@ define i64 @shl64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: shl64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a3, %hi(__ashldi3) ; RV32I-NEXT: addi a3, a3, %lo(__ashldi3) Index: llvm/trunk/test/CodeGen/RISCV/wide-mem.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/wide-mem.ll +++ llvm/trunk/test/CodeGen/RISCV/wide-mem.ll @@ -6,7 +6,7 @@ define i64 @load_i64(i64 *%a) nounwind { ; RV32I-LABEL: load_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: lw a1, 4(a0) ; RV32I-NEXT: addi a0, a2, 0 @@ -21,7 +21,7 @@ ; generate two addi define i64 @load_i64_global() nounwind { ; RV32I-LABEL: load_i64_global: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, %hi(val64) ; RV32I-NEXT: addi a0, a0, %lo(val64) ; RV32I-NEXT: lw a0, 0(a0) Index: llvm/trunk/test/CodeGen/SPARC/analyze-branch.ll =================================================================== --- llvm/trunk/test/CodeGen/SPARC/analyze-branch.ll +++ llvm/trunk/test/CodeGen/SPARC/analyze-branch.ll @@ -18,7 +18,7 @@ ; CHECK: cmp {{%[goli][0-9]+}}, 42 ; CHECK: bne [[FALSE:.LBB[0-9]+_[0-9]+]] ; CHECK-NEXT: nop -; CHECK-NEXT: ! BB# +; CHECK-NEXT: ! %bb. ; CHECK-NEXT: call test_true ; CHECK: [[FALSE]]: @@ -42,7 +42,7 @@ ; CHECK: be [[TRUE:.LBB[0-9]+_[0-9]+]] ; CHECK-NEXT: nop -; CHECK-NEXT: ! BB# +; CHECK-NEXT: ! %bb. ; CHECK-NEXT: call test_false ; CHECK: [[TRUE]]: Index: llvm/trunk/test/CodeGen/SPARC/vector-extract-elt.ll =================================================================== --- llvm/trunk/test/CodeGen/SPARC/vector-extract-elt.ll +++ llvm/trunk/test/CodeGen/SPARC/vector-extract-elt.ll @@ -5,7 +5,7 @@ ; look-thru for extractelement then we we know that the add will yield a ; non-negative result. define i1 @test1(<4 x i16>* %in) { -; CHECK-LABEL: ! BB#0: +; CHECK-LABEL: ! %bb.0: ; CHECK-NEXT: retl ; CHECK-NEXT: sethi 0, %o0 %vec2 = load <4 x i16>, <4 x i16>* %in, align 1 Index: llvm/trunk/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll =================================================================== --- llvm/trunk/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll +++ llvm/trunk/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll @@ -9,7 +9,7 @@ ; store i1 true, i1* %g_717.sink.i, align 4 ; %.b = load i1, i1* @g_2, align 4 -; CHECK: # BB#6: # %crc32_gentab.exit +; CHECK: # %bb.6: # %crc32_gentab.exit ; CHECK: larl %r2, g_2 ; CHECK-NEXT: llc %r3, 0(%r2) ; CHECK-NOT: %r2 Index: llvm/trunk/test/CodeGen/SystemZ/dag-combine-02.ll =================================================================== --- llvm/trunk/test/CodeGen/SystemZ/dag-combine-02.ll +++ llvm/trunk/test/CodeGen/SystemZ/dag-combine-02.ll @@ -93,7 +93,7 @@ br i1 %60, label %61, label %13 ;