Index: lib/Target/ARM/ARM.td =================================================================== --- lib/Target/ARM/ARM.td +++ lib/Target/ARM/ARM.td @@ -132,6 +132,11 @@ def FeaturePrefISHSTBarrier : SubtargetFeature<"prefer-ishst", "PreferISHST", "true", "Prefer ISHST barriers">; +// Some targets (e.g. Cortex-A15) never want VMOVS to be widened to VMOVD. +def FeatureDontWidenVMOVS : SubtargetFeature<"dont-widen-vmovs", + "DontWidenVMOVS", "true", + "Don't widen VMOVS to VMOVD">; + // Some targets (e.g. Cortex-A9) want to convert VMOVRS, VMOVSR and VMOVS from // VFP to NEON, as an execution domain optimization. def FeatureNEONForFPMovs : SubtargetFeature<"neon-fpmovs", "UseNEONForFPMovs", @@ -597,6 +602,7 @@ // FIXME: A15 has currently the same Schedule model as A9. def : ProcessorModel<"cortex-a15", CortexA9Model, [ARMv7a, ProcA15, + FeatureDontWidenVMOVS, FeatureHasRetAddrStack, FeatureTrustZone, FeatureT2XtPk, Index: lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.cpp +++ lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -51,15 +51,6 @@ EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, cl::desc("Enable ARM 2-addr to 3-addr conv")); -static cl::opt -WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true), - cl::desc("Widen ARM vmovs to vmovd when possible")); - -static cl::opt -SwiftPartialUpdateClearance("swift-partial-update-clearance", - cl::Hidden, cl::init(12), - cl::desc("Clearance before partial register updates")); - /// ARM_MLxEntry - Record information about MLA / MLS instructions. struct ARM_MLxEntry { uint16_t MLxOpc; // MLA / MLS opcode @@ -1296,8 +1287,7 @@ // copyPhysReg() calls. Look for VMOVS instructions that can legally be // widened to VMOVD. We prefer the VMOVD when possible because it may be // changed into a VORR that can go down the NEON pipeline. - if (!WidenVMOVS || !MI->isCopy() || Subtarget.isCortexA15() || - Subtarget.isFPOnlySP()) + if (!MI->isCopy() || Subtarget.dontWidenVMOVS() || Subtarget.isFPOnlySP()) return false; // Look for a copy between even S-registers. That is where we keep floats @@ -4478,8 +4468,8 @@ getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { - if (!SwiftPartialUpdateClearance || - !(Subtarget.isSwift() || Subtarget.isCortexA15())) + auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance(); + if (!PartialUpdateClearance) return 0; assert(TRI && "Need TRI instance"); @@ -4531,7 +4521,7 @@ // MI has an unwanted D-register dependency. // Avoid defs in the previous N instructrions. - return SwiftPartialUpdateClearance; + return PartialUpdateClearance; } // Break a partial register dependency after getPartialRegUpdateClearance Index: lib/Target/ARM/ARMSubtarget.h =================================================================== --- lib/Target/ARM/ARMSubtarget.h +++ lib/Target/ARM/ARMSubtarget.h @@ -249,6 +249,10 @@ /// If true, ISHST barriers will be used for Release semantics. bool PreferISHST = false; + /// If true, VMOVS will never be widened to VMOVD (regardless of the + /// widen-vmovs flag). + bool DontWidenVMOVS = false; + /// If true, VMOVRS, VMOVSR and VMOVS will be converted from VFP to NEON. bool UseNEONForFPMovs = false; @@ -292,6 +296,9 @@ unsigned MaxInterleaveFactor = 1; + /// Clearance before partial register updates (in number of instructions) + unsigned PartialUpdateClearance = 0; + /// What kind of timing do load multiple/store multiple have (double issue, /// single issue etc). ARMLdStMultipleTiming LdStMultipleTiming = SingleIssue; @@ -431,6 +438,7 @@ bool hasSlowVDUP32() const { return HasSlowVDUP32; } bool preferVMOVSR() const { return PreferVMOVSR; } bool preferISHSTBarriers() const { return PreferISHST; } + bool dontWidenVMOVS() const { return DontWidenVMOVS; } bool useNEONForFPMovs() const { return UseNEONForFPMovs; } bool checkVLDnAccessAlignment() const { return CheckVLDnAlign; } bool nonpipelinedVFP() const { return NonpipelinedVFP; } @@ -574,6 +582,8 @@ unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; } + unsigned getPartialUpdateClearance() const { return PartialUpdateClearance; } + ARMLdStMultipleTiming getLdStMultipleTiming() const { return LdStMultipleTiming; } Index: lib/Target/ARM/ARMSubtarget.cpp =================================================================== --- lib/Target/ARM/ARMSubtarget.cpp +++ lib/Target/ARM/ARMSubtarget.cpp @@ -220,6 +220,7 @@ case CortexA15: MaxInterleaveFactor = 2; PreISelOperandLatencyAdjustment = 1; + PartialUpdateClearance = 12; break; case CortexA17: case CortexA32: @@ -242,6 +243,7 @@ MaxInterleaveFactor = 2; LdStMultipleTiming = SingleIssuePlusExtras; PreISelOperandLatencyAdjustment = 1; + PartialUpdateClearance = 12; break; } } Index: test/CodeGen/ARM/widen-vmovs.ll =================================================================== --- test/CodeGen/ARM/widen-vmovs.ll +++ test/CodeGen/ARM/widen-vmovs.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -widen-vmovs -mcpu=cortex-a8 -verify-machineinstrs -disable-block-placement | FileCheck %s +; RUN: llc < %s -mcpu=cortex-a8 -verify-machineinstrs -disable-block-placement | FileCheck %s target triple = "thumbv7-apple-ios" ; The 1.0e+10 constant is loaded from the constant pool and kept in a register.