Index: llvm/trunk/include/llvm/Target/TargetSchedule.td =================================================================== --- llvm/trunk/include/llvm/Target/TargetSchedule.td +++ llvm/trunk/include/llvm/Target/TargetSchedule.td @@ -104,6 +104,7 @@ def NoSchedModel : SchedMachineModel { let NoModel = 1; + let CompleteModel = 0; } // Define a kind of processor resource that may be common across Index: llvm/trunk/lib/Target/AArch64/AArch64SchedA53.td =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64SchedA53.td +++ llvm/trunk/lib/Target/AArch64/AArch64SchedA53.td @@ -26,6 +26,7 @@ let MispredictPenalty = 9; // Based on "Cortex-A53 Software Optimisation // Specification - Instruction Timings" // v 1.0 Spreadsheet + let CompleteModel = 0; } Index: llvm/trunk/lib/Target/AArch64/AArch64SchedA57.td =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64SchedA57.td +++ llvm/trunk/lib/Target/AArch64/AArch64SchedA57.td @@ -30,6 +30,7 @@ // Enable partial & runtime unrolling. The magic number is chosen based on // experiments and benchmarking data. let LoopMicroOpBufferSize = 16; + let CompleteModel = 0; } //===----------------------------------------------------------------------===// Index: llvm/trunk/lib/Target/AArch64/AArch64SchedCyclone.td =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64SchedCyclone.td +++ llvm/trunk/lib/Target/AArch64/AArch64SchedCyclone.td @@ -17,6 +17,7 @@ let MicroOpBufferSize = 192; // Based on the reorder buffer. let LoadLatency = 4; // Optimistic load latency. let MispredictPenalty = 16; // 14-19 cycles are typical. + let CompleteModel = 0; } //===----------------------------------------------------------------------===// Index: llvm/trunk/lib/Target/AArch64/AArch64SchedKryo.td =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64SchedKryo.td +++ llvm/trunk/lib/Target/AArch64/AArch64SchedKryo.td @@ -26,6 +26,7 @@ // Enable partial & runtime unrolling. The magic number is chosen based on // experiments and benchmarking data. let LoopMicroOpBufferSize = 16; + let CompleteModel = 0; } //===----------------------------------------------------------------------===// Index: llvm/trunk/lib/Target/AMDGPU/SISchedule.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SISchedule.td +++ llvm/trunk/lib/Target/AMDGPU/SISchedule.td @@ -39,8 +39,12 @@ // instructions and have VALU rates, but write to the SALU (i.e. VOPC // instructions) -def SIFullSpeedModel : SchedMachineModel; -def SIQuarterSpeedModel : SchedMachineModel; +def SIFullSpeedModel : SchedMachineModel { + let CompleteModel = 0; +} +def SIQuarterSpeedModel : SchedMachineModel { + let CompleteModel = 0; +} // BufferSize = 0 means the processors are in-order. let BufferSize = 0 in { Index: llvm/trunk/lib/Target/ARM/ARMScheduleA8.td =================================================================== --- llvm/trunk/lib/Target/ARM/ARMScheduleA8.td +++ llvm/trunk/lib/Target/ARM/ARMScheduleA8.td @@ -1070,6 +1070,7 @@ // This is overriden by OperandCycles if the // Itineraries are queried instead. let MispredictPenalty = 13; // Based on estimate of pipeline depth. + let CompleteModel = 0; let Itineraries = CortexA8Itineraries; } Index: llvm/trunk/lib/Target/Hexagon/HexagonScheduleV4.td =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonScheduleV4.td +++ llvm/trunk/lib/Target/Hexagon/HexagonScheduleV4.td @@ -199,6 +199,7 @@ let IssueWidth = 4; let Itineraries = HexagonItinerariesV4; let LoadLatency = 1; + let CompleteModel = 0; } //===----------------------------------------------------------------------===// Index: llvm/trunk/lib/Target/Hexagon/HexagonScheduleV55.td =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonScheduleV55.td +++ llvm/trunk/lib/Target/Hexagon/HexagonScheduleV55.td @@ -163,6 +163,7 @@ let IssueWidth = 4; let Itineraries = HexagonItinerariesV55; let LoadLatency = 1; + let CompleteModel = 0; } //===----------------------------------------------------------------------===// Index: llvm/trunk/lib/Target/Hexagon/HexagonScheduleV60.td =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonScheduleV60.td +++ llvm/trunk/lib/Target/Hexagon/HexagonScheduleV60.td @@ -303,6 +303,7 @@ let IssueWidth = 4; let Itineraries = HexagonItinerariesV60; let LoadLatency = 1; + let CompleteModel = 0; } //===----------------------------------------------------------------------===// Index: llvm/trunk/lib/Target/Mips/MipsScheduleP5600.td =================================================================== --- llvm/trunk/lib/Target/Mips/MipsScheduleP5600.td +++ llvm/trunk/lib/Target/Mips/MipsScheduleP5600.td @@ -13,7 +13,7 @@ int LoadLatency = 4; int MispredictPenalty = 8; // TODO: Estimated - let CompleteModel = 1; + let CompleteModel = 0; } let SchedModel = MipsP5600Model in { Index: llvm/trunk/lib/Target/PowerPC/PPCSchedule440.td =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCSchedule440.td +++ llvm/trunk/lib/Target/PowerPC/PPCSchedule440.td @@ -602,6 +602,8 @@ // This is overriden by OperandCycles if the // Itineraries are queried instead. + let CompleteModel = 0; + let Itineraries = PPC440Itineraries; } Index: llvm/trunk/lib/Target/PowerPC/PPCScheduleA2.td =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCScheduleA2.td +++ llvm/trunk/lib/Target/PowerPC/PPCScheduleA2.td @@ -166,6 +166,8 @@ // Itineraries are queried instead. let MispredictPenalty = 13; + let CompleteModel = 0; + let Itineraries = PPCA2Itineraries; } Index: llvm/trunk/lib/Target/PowerPC/PPCScheduleE500mc.td =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCScheduleE500mc.td +++ llvm/trunk/lib/Target/PowerPC/PPCScheduleE500mc.td @@ -316,5 +316,7 @@ // This is overriden by OperandCycles if the // Itineraries are queried instead. + let CompleteModel = 0; + let Itineraries = PPCE500mcItineraries; } Index: llvm/trunk/lib/Target/PowerPC/PPCScheduleE5500.td =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCScheduleE5500.td +++ llvm/trunk/lib/Target/PowerPC/PPCScheduleE5500.td @@ -376,5 +376,7 @@ // This is overriden by OperandCycles if the // Itineraries are queried instead. + let CompleteModel = 0; + let Itineraries = PPCE5500Itineraries; } Index: llvm/trunk/lib/Target/PowerPC/PPCScheduleG5.td =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCScheduleG5.td +++ llvm/trunk/lib/Target/PowerPC/PPCScheduleG5.td @@ -124,6 +124,8 @@ // Itineraries are queried instead. let MispredictPenalty = 16; + let CompleteModel = 0; + let Itineraries = G5Itineraries; } Index: llvm/trunk/lib/Target/PowerPC/PPCScheduleP7.td =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCScheduleP7.td +++ llvm/trunk/lib/Target/PowerPC/PPCScheduleP7.td @@ -391,6 +391,8 @@ // Try to make sure we have at least 10 dispatch groups in a loop. let LoopMicroOpBufferSize = 40; + let CompleteModel = 0; + let Itineraries = P7Itineraries; } Index: llvm/trunk/lib/Target/PowerPC/PPCScheduleP8.td =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCScheduleP8.td +++ llvm/trunk/lib/Target/PowerPC/PPCScheduleP8.td @@ -400,6 +400,8 @@ // Try to make sure we have at least 10 dispatch groups in a loop. let LoopMicroOpBufferSize = 60; + let CompleteModel = 0; + let Itineraries = P8Itineraries; } Index: llvm/trunk/lib/Target/X86/X86Schedule.td =================================================================== --- llvm/trunk/lib/Target/X86/X86Schedule.td +++ llvm/trunk/lib/Target/X86/X86Schedule.td @@ -640,6 +640,7 @@ let LoadLatency = 4; let HighLatency = 10; let PostRAScheduler = 0; + let CompleteModel = 0; } include "X86ScheduleAtom.td" Index: llvm/trunk/lib/Target/X86/X86ScheduleAtom.td =================================================================== --- llvm/trunk/lib/Target/X86/X86ScheduleAtom.td +++ llvm/trunk/lib/Target/X86/X86ScheduleAtom.td @@ -544,6 +544,7 @@ // simple loops, expand by a small factor to hide the backedge cost. let LoopMicroOpBufferSize = 10; let PostRAScheduler = 1; + let CompleteModel = 0; let Itineraries = AtomItineraries; } Index: llvm/trunk/utils/TableGen/CodeGenSchedule.h =================================================================== --- llvm/trunk/utils/TableGen/CodeGenSchedule.h +++ llvm/trunk/utils/TableGen/CodeGenSchedule.h @@ -401,6 +401,8 @@ void inferSchedClasses(); + void checkCompleteness(); + void inferFromRW(ArrayRef OperWrites, ArrayRef OperReads, unsigned FromClassIdx, ArrayRef ProcIndices); void inferFromItinClass(Record *ItinClassDef, unsigned FromClassIdx); Index: llvm/trunk/utils/TableGen/CodeGenSchedule.cpp =================================================================== --- llvm/trunk/utils/TableGen/CodeGenSchedule.cpp +++ llvm/trunk/utils/TableGen/CodeGenSchedule.cpp @@ -126,6 +126,8 @@ // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and // ProcResourceDefs. collectProcResources(); + + checkCompleteness(); } /// Gather all processor models. @@ -1523,6 +1525,49 @@ } } +void CodeGenSchedModels::checkCompleteness() { + bool Complete = true; + bool HadCompleteModel = false; + for (const CodeGenProcModel &ProcModel : procModels()) { + // Note that long-term we should check "CompleteModel", but for now most + // models that claim to be complete are actually not so we use a separate + // "CheckCompleteness" bit. + if (!ProcModel.ModelDef->getValueAsBit("CompleteModel")) + continue; + for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { + if (Inst->hasNoSchedulingInfo) + continue; + unsigned SCIdx = getSchedClassIdx(*Inst); + if (!SCIdx) { + if (Inst->TheDef->isValueUnset("SchedRW") && !HadCompleteModel) { + PrintError("No schedule information for instruction '" + + Inst->TheDef->getName() + "'"); + Complete = false; + } + continue; + } + + const CodeGenSchedClass &SC = getSchedClass(SCIdx); + if (!SC.Writes.empty()) + continue; + + const RecVec &InstRWs = SC.InstRWs; + auto I = std::find_if(InstRWs.begin(), InstRWs.end(), + [&ProcModel] (const Record *R) { + return R->getValueAsDef("SchedModel") == ProcModel.ModelDef; + }); + if (I == InstRWs.end()) { + PrintError("'" + ProcModel.ModelName + "' lacks information for '" + + Inst->TheDef->getName() + "'"); + Complete = false; + } + } + HadCompleteModel = true; + } + if (!Complete) + PrintFatalError("Incomplete schedule model"); +} + // Collect itinerary class resources for each processor. void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) { for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {