Index: lib/Target/ARM/ARM.td =================================================================== --- lib/Target/ARM/ARM.td +++ lib/Target/ARM/ARM.td @@ -312,6 +312,9 @@ "equivalent when the immediate does " "not fit in the encoding.">; +// Use the MachineScheduler for instruction scheduling for the subtarget. +def FeatureUseMISched: SubtargetFeature<"use-misched", "UseMISched", "true", + "Use the MachineScheduler">; //===----------------------------------------------------------------------===// // ARM architecture class @@ -791,7 +794,8 @@ FeatureSlowOddRegister, FeatureSlowLoadDSubreg, FeatureSlowVGETLNi32, - FeatureSlowVDUP32]>; + FeatureSlowVDUP32, + FeatureUseMISched]>; def : ProcessorModel<"cortex-r4", CortexA8Model, [ARMv7r, ProcR4, FeatureHasRetAddrStack, @@ -915,6 +919,7 @@ FeatureAvoidMOVsShOp, FeatureHasSlowFPVMLx, FeatureCrypto, + FeatureUseMISched, FeatureZCZeroing]>; def : ProcNoItin<"exynos-m1", [ARMv8a, ProcExynosM1, Index: lib/Target/ARM/ARMSubtarget.h =================================================================== --- lib/Target/ARM/ARMSubtarget.h +++ lib/Target/ARM/ARMSubtarget.h @@ -180,6 +180,9 @@ /// UseSoftFloat - True if we're using software floating point features. bool UseSoftFloat = false; + /// UseMISched - True if MachineScheduler should be used for this subtarget. + bool UseMISched = false; + /// HasThumb2 - True if Thumb2 instructions are supported. bool HasThumb2 = false; @@ -645,6 +648,7 @@ bool isROPI() const; bool isRWPI() const; + bool useMachineScheduler() const { return UseMISched; } bool useSoftFloat() const { return UseSoftFloat; } bool isThumb() const { return InThumbMode; } bool isThumb1Only() const { return InThumbMode && !HasThumb2; } Index: lib/Target/ARM/ARMSubtarget.cpp =================================================================== --- lib/Target/ARM/ARMSubtarget.cpp +++ lib/Target/ARM/ARMSubtarget.cpp @@ -396,17 +396,16 @@ } bool ARMSubtarget::enableMachineScheduler() const { - // Enable the MachineScheduler before register allocation for out-of-order - // architectures where we do not use the PostRA scheduler anymore (for now - // restricted to swift). - return getSchedModel().isOutOfOrder() && isSwift(); + // Enable the MachineScheduler before register allocation for subtargets + // with the use-misched feature. + return useMachineScheduler(); } // This overrides the PostRAScheduler bit in the SchedModel for any CPU. bool ARMSubtarget::enablePostRAScheduler() const { - // No need for PostRA scheduling on out of order CPUs (for now restricted to - // swift). - if (getSchedModel().isOutOfOrder() && isSwift()) + // No need for PostRA scheduling on subtargets where we use the + // MachineScheduler. + if (useMachineScheduler()) return false; return (!isThumb() || hasThumb2()); }