Index: lib/Bitcode/Writer/BitcodeWriter.cpp =================================================================== --- lib/Bitcode/Writer/BitcodeWriter.cpp +++ lib/Bitcode/Writer/BitcodeWriter.cpp @@ -2507,9 +2507,9 @@ // For the module-level VST, add abbrev Ids for the VST_CODE_FNENTRY // records, which are not used in the per-function VSTs. - unsigned FnEntry8BitAbbrev; - unsigned FnEntry7BitAbbrev; - unsigned FnEntry6BitAbbrev; + unsigned FnEntry8BitAbbrev = VST_ENTRY_8_ABBREV; + unsigned FnEntry7BitAbbrev = VST_ENTRY_7_ABBREV; + unsigned FnEntry6BitAbbrev = VST_ENTRY_6_ABBREV; if (IsModuleLevel && hasVSTOffsetPlaceholder()) { // 8-bit fixed-width VST_CODE_FNENTRY function strings. BitCodeAbbrev *Abbv = new BitCodeAbbrev(); Index: lib/CodeGen/ScheduleDAGInstrs.cpp =================================================================== --- lib/CodeGen/ScheduleDAGInstrs.cpp +++ lib/CodeGen/ScheduleDAGInstrs.cpp @@ -914,7 +914,8 @@ for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin; MII != MIE; --MII) { MachineInstr *MI = std::prev(MII); - if (MI && DbgMI) { + + if (DbgMI) { DbgValues.push_back(std::make_pair(DbgMI, MI)); DbgMI = nullptr; } Index: lib/Target/ARM/ARMFrameLowering.cpp =================================================================== --- lib/Target/ARM/ARMFrameLowering.cpp +++ lib/Target/ARM/ARMFrameLowering.cpp @@ -1607,9 +1607,9 @@ // FIXME: We could add logic to be more precise about negative offsets // and which instructions will need a scratch register for them. Is it // worth the effort and added fragility? - bool BigStack = (RS && (MFI->estimateStackSize(MF) + - ((hasFP(MF) && AFI->hasStackFrame()) ? 4 : 0) >= - estimateRSStackSizeLimit(MF, this))) || + bool BigStack = ((MFI->estimateStackSize(MF) + + ((hasFP(MF) && AFI->hasStackFrame()) ? 4 : 0) >= + estimateRSStackSizeLimit(MF, this))) || MFI->hasVarSizedObjects() || (MFI->adjustsStack() && !canSimplifyCallFramePseudos(MF)); Index: lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp =================================================================== --- lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp +++ lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp @@ -406,6 +406,7 @@ report_fatal_error("FIXME: relocations to absolute targets " "not yet implemented"); } else { + assert(A && "symbol is needed to resolve constant variables"); // Resolve constant variables. if (A->isVariable()) { int64_t Res; Index: lib/Target/ARM/Thumb2SizeReduction.cpp =================================================================== --- lib/Target/ARM/Thumb2SizeReduction.cpp +++ lib/Target/ARM/Thumb2SizeReduction.cpp @@ -989,6 +989,7 @@ } if (!NextInSameBundle && MI->isInsideBundle()) { + assert(BundleMI && "MachineInstr is null, abort reducing width of instructions"); // FIXME: Since post-ra scheduler operates on bundles, the CPSR kill // marker is only on the BUNDLE instruction. Process the BUNDLE // instruction as we finish with the bundled instruction to work around Index: lib/Target/Hexagon/HexagonCFGOptimizer.cpp =================================================================== --- lib/Target/Hexagon/HexagonCFGOptimizer.cpp +++ lib/Target/Hexagon/HexagonCFGOptimizer.cpp @@ -180,6 +180,7 @@ // Ensure that BB2 has one instruction -- an unconditional jump. if ((LayoutSucc->size() == 1) && IsUnconditionalJump(LayoutSucc->front().getOpcode())) { + assert(JumpAroundTarget && "jump target is needed to process second basic block"); MachineBasicBlock* UncondTarget = LayoutSucc->front().getOperand(0).getMBB(); // Check if the layout successor of BB2 is BB3. Index: lib/Target/Hexagon/RDFGraph.cpp =================================================================== --- lib/Target/Hexagon/RDFGraph.cpp +++ lib/Target/Hexagon/RDFGraph.cpp @@ -1564,6 +1564,7 @@ // Push block delimiters. markBlock(BA.Id, DefM); + assert(BA.Addr && "block node address is needed to create a data-flow link"); // For each non-phi instruction in the block, link all the defs and uses // to their reaching defs. For any member of the block (including phis), // push the defs on the corresponding stacks. Index: lib/Target/X86/AsmParser/X86AsmParser.cpp =================================================================== --- lib/Target/X86/AsmParser/X86AsmParser.cpp +++ lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -3002,6 +3002,7 @@ getStreamer().InitSections(false); Section = getStreamer().getCurrentSection().first; } + assert(Section && "must have section to emit alignment"); if (Section->UseCodeAlign()) getStreamer().EmitCodeAlignment(2, 0); else Index: lib/Transforms/Scalar/ScalarReplAggregates.cpp =================================================================== --- lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -1986,6 +1986,7 @@ /// and recursively continue updating all of its uses. void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset, SmallVectorImpl &NewElts) { + assert((BC && AI) && "BitCast and Alloca cannot be null" ); RewriteForScalarRepl(BC, AI, Offset, NewElts); if (BC->getOperand(0) != AI) return; Index: lib/Transforms/Scalar/StructurizeCFG.cpp =================================================================== --- lib/Transforms/Scalar/StructurizeCFG.cpp +++ lib/Transforms/Scalar/StructurizeCFG.cpp @@ -719,6 +719,8 @@ /// \brief Create a new or reuse the previous node as flow node BasicBlock *StructurizeCFG::needPrefix(bool NeedEmpty) { + assert(PrevNode && "cannot reuse null PrevNode as flow node"); + BasicBlock *Entry = PrevNode->getEntry(); if (!PrevNode->isSubRegion()) { Index: lib/Transforms/Scalar/TailRecursionElimination.cpp =================================================================== --- lib/Transforms/Scalar/TailRecursionElimination.cpp +++ lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -590,6 +590,7 @@ if (CI->isTailCall() && CannotTailCallElimCallsMarkedTail) return nullptr; + assert(F && "BasicBlock parent did not have an enclosing method"); // As a special case, detect code like this: // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call // and disable this xform in this case, because the code generator will