Index: include/llvm/CodeGen/MachineCombinerPattern.h
===================================================================
--- include/llvm/CodeGen/MachineCombinerPattern.h
+++ include/llvm/CodeGen/MachineCombinerPattern.h
@@ -71,7 +71,10 @@
   FMLSv2f32_OP2,
   FMLSv2f64_OP2,
   FMLSv4i32_indexed_OP2,
-  FMLSv4f32_OP2
+  FMLSv4f32_OP2,
+
+  // This is FDIV-RECIP pattern matched by X86 machine combiner
+  Div2RecipEst
 };
 
 } // end namespace llvm
Index: include/llvm/CodeGen/MachineInstr.h
===================================================================
--- include/llvm/CodeGen/MachineInstr.h
+++ include/llvm/CodeGen/MachineInstr.h
@@ -1148,10 +1148,14 @@
 
   //
   // Debugging support
-  //
-  void print(raw_ostream &OS, bool SkipOpers = false) const;
-  void print(raw_ostream &OS, ModuleSlotTracker &MST,
-             bool SkipOpers = false) const;
+  // Sometimes (e.g. in Machine Combiner) we have sequences of instructions
+  // which are not inserted in any Basic Block. As result we can't pretty print
+  // those instructions because disassembler needs access to such things like
+  // Target Instruction Info. To support the access we included P argument below
+  void print(raw_ostream &OS, bool SkipOpers = false,
+             const MachineBasicBlock *P = nullptr) const;
+  void print(raw_ostream &OS, ModuleSlotTracker &MST, bool SkipOpers = false,
+             const MachineBasicBlock *P = nullptr) const;
   void dump() const;
 
   //===--------------------------------------------------------------------===//
Index: lib/CodeGen/MachineCombiner.cpp
===================================================================
--- lib/CodeGen/MachineCombiner.cpp
+++ lib/CodeGen/MachineCombiner.cpp
@@ -152,9 +152,13 @@
         assert(DefInstr &&
                "There must be a definition for a new virtual register");
         DepthOp = InstrDepth[II->second];
-        LatencyOp = TSchedModel.computeOperandLatency(
-            DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
-            InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
+        int DefIdx = DefInstr->findRegisterDefOperandIdx(MO.getReg());
+        int UseIdx = InstrPtr->findRegisterUseOperandIdx(MO.getReg());
+        if (DefIdx < 0 || UseIdx < 0)
+          LatencyOp = TII->defaultDefLatency(SchedModel, *DefInstr);
+        else
+          LatencyOp = TSchedModel.computeOperandLatency(DefInstr, DefIdx,
+                                                        InstrPtr, UseIdx);
       } else {
         MachineInstr *DefInstr = getOperandDef(MO);
         if (DefInstr) {
Index: lib/CodeGen/MachineInstr.cpp
===================================================================
--- lib/CodeGen/MachineInstr.cpp
+++ lib/CodeGen/MachineInstr.cpp
@@ -1690,26 +1690,28 @@
 #endif
 }
 
-void MachineInstr::print(raw_ostream &OS, bool SkipOpers) const {
+void MachineInstr::print(raw_ostream &OS, bool SkipOpers,
+                         const MachineBasicBlock *P) const {
   const Module *M = nullptr;
   if (const MachineBasicBlock *MBB = getParent())
     if (const MachineFunction *MF = MBB->getParent())
       M = MF->getFunction()->getParent();
 
   ModuleSlotTracker MST(M);
-  print(OS, MST, SkipOpers);
+  print(OS, MST, SkipOpers, P);
 }
 
 void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
-                         bool SkipOpers) const {
+                         bool SkipOpers, const MachineBasicBlock *P) const {
   // We can be a bit tidier if we know the MachineFunction.
   const MachineFunction *MF = nullptr;
   const TargetRegisterInfo *TRI = nullptr;
   const MachineRegisterInfo *MRI = nullptr;
   const TargetInstrInfo *TII = nullptr;
   const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
+  const MachineBasicBlock *MBB = P ? P : getParent();
 
-  if (const MachineBasicBlock *MBB = getParent()) {
+  if (MBB) {
     MF = MBB->getParent();
     if (MF) {
       MRI = &MF->getRegInfo();
Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -14962,6 +14962,18 @@
 ///   X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form
 ///     does not require additional intermediate precision]
 SDValue DAGCombiner::BuildReciprocalEstimate(SDValue Op, SDNodeFlags *Flags) {
+
+  // This effort should be moved lower on Machine Combiner level because only
+  // there we know real estimation of fdiv and its reciprocal implementation
+  switch (DAG.getTarget().getTargetTriple().getArch()) {
+  default:
+    break;
+  // These architectures deal with reciprocal estimate on Machine Combiner level
+  case Triple::x86:
+  case Triple::x86_64:
+    return SDValue();
+  }
+
   if (Level >= AfterLegalizeDAG)
     return SDValue();
 
Index: lib/Target/X86/X86InstrInfo.h
===================================================================
--- lib/Target/X86/X86InstrInfo.h
+++ lib/Target/X86/X86InstrInfo.h
@@ -504,6 +504,21 @@
     return true;
   }
 
+  /// When getMachineCombinerPatterns() finds patterns, this function generates
+  /// the instructions that could replace the original code sequence
+  void genAlternativeCodeSequence(
+      MachineInstr &Root, MachineCombinerPattern Pattern,
+      SmallVectorImpl<MachineInstr *> &InsInstrs,
+      SmallVectorImpl<MachineInstr *> &DelInstrs,
+      DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
+
+  /// Return true when there is potentially a faster code sequence
+  /// for an instruction chain ending in <Root>. All potential patterns are
+  /// listed in the <Patterns> array.
+  bool getMachineCombinerPatterns(
+      MachineInstr &Root,
+      SmallVectorImpl<MachineCombinerPattern> &Patterns) const override;
+
   bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
 
   bool hasReassociableOperands(const MachineInstr &Inst,
Index: lib/Target/X86/X86InstrInfo.cpp
===================================================================
--- lib/Target/X86/X86InstrInfo.cpp
+++ lib/Target/X86/X86InstrInfo.cpp
@@ -9312,6 +9312,354 @@
   }
 }
 
+static bool isDividendExactlyOne(MachineFunction &MF, unsigned DividendReg) {
+  // The dividend could be ExactlyOne value and in this case we should not
+  // create
+  // additional constant for reciprocal division but use the dividend instead.
+  // We're trying to find the devident definition and if it's a constant
+  // ExactlyOne value we'll use it
+  for (auto &MBB : MF) {
+    for (auto &MI : MBB) {
+      for (auto &MO : MI.operands()) {
+        if (!MO.isReg() || !MO.isDef())
+          continue;
+        if (MO.getReg() == DividendReg) {
+          auto Constants =
+              MI.getParent()->getParent()->getConstantPool()->getConstants();
+          for (auto &MO : MI.operands()) {
+            if (MO.isCPI()) {
+              // We have a Constant Pool Index operand in this instruction
+              // FIXME: should we deal with other types of operand like
+              // Immediate?
+              auto ConstantEntry = Constants[MO.getIndex()];
+              // FIXME: what should we do with MachineConstantPoolEntry?
+              if (!ConstantEntry.isMachineConstantPoolEntry()) {
+                if (auto *C = dyn_cast<Constant>(ConstantEntry.Val.ConstVal)) {
+                  if (C->getType()->isVectorTy()) {
+                    if (!(C = C->getSplatValue()))
+                      return false;
+                  }
+                  if (auto *CFP = dyn_cast<ConstantFP>(C))
+                    return CFP->isExactlyValue(1.0);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+  return false;
+}
+
+static EVT getFDivEVT(MachineInstr &Root) {
+  // FIXME: should we support other kinds of DIV?
+  switch (Root.getOpcode()) {
+  default:
+    break;
+  case X86::DIVSSrr:  // f32
+  case X86::VDIVSSrr: // f32
+    return MVT::f32;
+  case X86::DIVPSrr:  // v4f32
+  case X86::VDIVPSrr: // v4f32
+    return MVT::v4f32;
+  case X86::VDIVPSYrr: // v8f32
+    return MVT::v8f32;
+  }
+  return MVT::INVALID_SIMPLE_VALUE_TYPE;
+}
+
+/// genFDivReciprocal - Generates A = B * 1/C instead of A = B/C
+///
+/// To get more precision we're using Newton-Raphson iterations like here:
+///
+/// X[0] = reciprocal (C);
+/// X[i+1] = X[i] + X[i] * (1 - C * X[i]); every iteration increases precision
+///
+/// And the result of division will be here: A = B * X
+/// Example (-x86-asm-syntax=intel): instead of
+///
+///   vmovss  xmm1, dword ptr [rip + .LCPI0_0] # xmm1 = mem[0],zero,zero,zero
+///   vdivss  xmm0, xmm1, xmm0
+///
+/// we're generating
+///
+///   vmovss  xmm1, dword ptr [rip + .LCPI0_0] # xmm1 = mem[0],zero,zero,zero
+///   vrcpss  xmm2, xmm0, xmm0
+///   vmulss  xmm0, xmm0, xmm2
+///   vsubss  xmm0, xmm1, xmm0
+///   vmulss  xmm0, xmm0, xmm2
+///   vaddss  xmm0, xmm0, xmm2
+
+static void genReciprocalDiv(MachineInstr &Root,
+                             SmallVectorImpl<MachineInstr *> &InsInstrs,
+                             DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
+                             ArrayRef<int> Instrs, Type *Ty,
+                             X86Subtarget &Subtarget) {
+
+  MachineBasicBlock *MBB = Root.getParent();
+  MachineFunction &MF = *MBB->getParent();
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+  const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
+  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+  const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
+  int Iterations = TLI->getDivRefinementSteps(getFDivEVT(Root), MF);
+
+  unsigned ResultReg = Root.getOperand(0).getReg();
+
+  unsigned DividendReg = Root.getOperand(1).getReg();
+  bool DividendIsExactlyOne = isDividendExactlyOne(MF, DividendReg);
+
+  unsigned DividerReg = Root.getOperand(2).getReg();
+  bool DividerIsKill = Root.getOperand(2).isKill();
+  if (TargetRegisterInfo::isVirtualRegister(ResultReg))
+    MRI.constrainRegClass(ResultReg, RC);
+  if (TargetRegisterInfo::isVirtualRegister(DividendReg))
+    MRI.constrainRegClass(DividendReg, RC);
+  if (TargetRegisterInfo::isVirtualRegister(DividerReg))
+    MRI.constrainRegClass(DividerReg, RC);
+
+  if (Iterations < 0)
+    Iterations = 1; // undefined value of iterations should produce 1 iteration
+
+  // 0: rcp
+  // Initial estimate value is recipocal division of C
+  MachineInstrBuilder RcpMI;
+  // Iff DivIsRcp == true Then div ~= rcp without any additional refinement
+  bool DivIsRcp = DividendIsExactlyOne && !Iterations;
+
+  unsigned RcpVReg;
+  if (!DivIsRcp) {
+    // We need refinement and only because of that we need this vreg
+    RcpVReg = MRI.createVirtualRegister(RC);
+    InstrIdxForVirtReg.insert(std::make_pair(RcpVReg, 0));
+  }
+  if (Instrs[0] == X86::VRCPSSr)
+    RcpMI = BuildMI(MF, Root.getDebugLoc(), TII->get(Instrs[0]),
+                    DivIsRcp ? ResultReg : RcpVReg)
+                .addReg(DividerReg, getKillRegState(DividerIsKill))
+                .addReg(DividerReg, getKillRegState(DividerIsKill));
+  else
+    RcpMI = BuildMI(MF, Root.getDebugLoc(), TII->get(Instrs[0]),
+                    DivIsRcp ? ResultReg : RcpVReg)
+                .addReg(DividerReg, getKillRegState(DividerIsKill));
+  InsInstrs.push_back(RcpMI);
+
+  unsigned LoadVReg = 0;
+  if (!DividendIsExactlyOne && Iterations) {
+    // 2: load
+    // We need all ones value to be able to do (1 - C * X[i])
+    // x86-32 PIC requires a PIC base register for constant pools.
+    unsigned PICBase = 0;
+    if (MF.getTarget().isPositionIndependent()) {
+      if (Subtarget.is64Bit())
+        PICBase = X86::RIP;
+      else
+        // FIXME: PICBase = getGlobalBaseReg(&MF);
+        // This doesn't work for several reasons.
+        // 1. GlobalBaseReg may have been spilled.
+        // 2. It may not be live at MI.
+        return;
+    }
+    // Create a constant-pool entry.
+    MachineConstantPool &MCP = *MF.getConstantPool();
+    //  const Constant *C = Constant::getAllOnesValue(Ty);
+    auto *CFP = ConstantFP::get(Ty, 1.0);
+    unsigned CPI = MCP.getConstantPoolIndex(CFP, 4);
+    LoadVReg = MRI.createVirtualRegister(RC);
+    InstrIdxForVirtReg.insert(std::make_pair(LoadVReg, 0));
+
+    MachineInstrBuilder LoadMI;
+    auto& MIDesc = TII->get(Instrs[2]);
+    if (MIDesc.getNumOperands() == 6)
+      LoadMI = BuildMI(MF, Root.getDebugLoc(), MIDesc, LoadVReg)
+                   .addReg(PICBase)
+                   .addImm(1)
+                   .addReg(0)
+                   .addConstantPoolIndex(CPI)
+                   .addReg(0);
+    else
+      LoadMI = BuildMI(MF, Root.getDebugLoc(), MIDesc, LoadVReg)
+                   .addConstantPoolIndex(CPI);
+    InsInstrs.push_back(LoadMI);
+  }
+  unsigned EstVReg = RcpVReg; // X[0] = reciprocal (C);
+
+  for (int i = 0; i < Iterations; i++) {
+    // 1: mul
+    unsigned MulVReg = MRI.createVirtualRegister(RC);
+    InstrIdxForVirtReg.insert(std::make_pair(MulVReg, 0));
+    MachineInstrBuilder MulMI =
+        BuildMI(MF, Root.getDebugLoc(), TII->get(Instrs[1]), MulVReg)
+            .addReg(DividerReg, getKillRegState(DividerIsKill))
+            .addReg(EstVReg);
+    InsInstrs.push_back(MulMI); // C * X[i]
+
+    // 3: sub
+    unsigned SubVReg = MRI.createVirtualRegister(RC);
+    InstrIdxForVirtReg.insert(std::make_pair(SubVReg, 0));
+    MachineInstrBuilder SubMI =
+        BuildMI(MF, Root.getDebugLoc(), TII->get(Instrs[3]), SubVReg)
+            .addReg(DividendIsExactlyOne ? DividendReg : LoadVReg)
+            .addReg(MulVReg);
+    InsInstrs.push_back(SubMI); // 1 - C * X[i]
+
+    // 4: mul2
+    unsigned Mul2VReg = MRI.createVirtualRegister(RC);
+    InstrIdxForVirtReg.insert(std::make_pair(Mul2VReg, 0));
+    MachineInstrBuilder Mul2MI =
+        BuildMI(MF, Root.getDebugLoc(), TII->get(Instrs[4]), Mul2VReg)
+            .addReg(SubVReg)
+            .addReg(EstVReg);
+    InsInstrs.push_back(Mul2MI); // X[i] * (1 - C * X[i])
+
+    // 5: add
+    MachineInstrBuilder AddMI;
+    if (DividendIsExactlyOne && (i + 1 == Iterations))
+      AddMI = BuildMI(MF, Root.getDebugLoc(), TII->get(Instrs[5]), ResultReg)
+                  .addReg(Mul2VReg)
+                  .addReg(EstVReg);
+    else {
+      unsigned AddVReg = MRI.createVirtualRegister(RC);
+      InstrIdxForVirtReg.insert(std::make_pair(AddVReg, 0));
+      AddMI = BuildMI(MF, Root.getDebugLoc(), TII->get(Instrs[5]), AddVReg)
+                  .addReg(Mul2VReg)
+                  .addReg(EstVReg);
+      EstVReg = AddVReg;
+    }
+    InsInstrs.push_back(AddMI); // X[i] + X[i] * (1 - C * X[i])
+  }
+  if (!DividendIsExactlyOne) {
+    // 6: result mul
+    // The final multiplication B * 1/C
+    MachineInstrBuilder ResultMulMI =
+        BuildMI(MF, Root.getDebugLoc(), TII->get(Instrs[6]), ResultReg)
+            .addReg(DividendReg)
+            .addReg(EstVReg);
+    InsInstrs.push_back(ResultMulMI);
+  }
+  return;
+}
+
+/// When getMachineCombinerPatterns() finds potential patterns,
+/// this function generates the instructions that could replace the
+/// original code sequence
+void X86InstrInfo::genAlternativeCodeSequence(
+    MachineInstr &Root, MachineCombinerPattern Pattern,
+    SmallVectorImpl<MachineInstr *> &InsInstrs,
+    SmallVectorImpl<MachineInstr *> &DelInstrs,
+    DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
+  MachineBasicBlock &MBB = *Root.getParent();
+  MachineFunction &MF = *MBB.getParent();
+
+  switch (Pattern) {
+  default:
+    // Reassociate instructions.
+    TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
+                                                DelInstrs, InstrIdxForVirtReg);
+    return;
+  case MachineCombinerPattern::Div2RecipEst:
+    switch (Root.getOpcode()) {
+    default:
+      return;
+    case X86::VDIVSSrr: // f32
+      genReciprocalDiv(
+          Root, InsInstrs, InstrIdxForVirtReg,
+          {X86::VRCPSSr, X86::VMULSSrr, X86::VMOVSSrm, X86::VSUBSSrr,
+           X86::VMULSSrr, X86::VADDSSrr, X86::VMULSSrr},
+          Type::getFloatTy(MF.getFunction()->getContext()), Subtarget);
+      break;
+    case X86::VDIVPSrr: // v4f32
+      genReciprocalDiv(
+          Root, InsInstrs, InstrIdxForVirtReg,
+          {X86::VRCPPSr, X86::VMULPSrr, X86::VMOVAPSrm, X86::VSUBPSrr,
+           X86::VMULPSrr, X86::VADDPSrr, X86::VMULPSrr},
+          VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 4),
+          Subtarget);
+      break;
+    case X86::VDIVPSYrr: // v8f32
+      genReciprocalDiv(
+          Root, InsInstrs, InstrIdxForVirtReg,
+          {X86::VRCPPSYr, X86::VMULPSYrr, X86::VMOVAPSYrm, X86::VSUBPSYrr,
+           X86::VMULPSYrr, X86::VADDPSYrr, X86::VMULPSYrr},
+          VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8),
+          Subtarget);
+      break;
+    case X86::DIVSSrr: // f32
+      genReciprocalDiv(Root, InsInstrs, InstrIdxForVirtReg,
+                       {X86::RCPSSr, X86::MULSSrr, X86::MOVSSrm, X86::SUBSSrr,
+                        X86::MULSSrr, X86::ADDSSrr, X86::MULSSrr},
+                       Type::getFloatTy(MF.getFunction()->getContext()),
+                       Subtarget);
+      break;
+    case X86::DIVPSrr: // v4f32
+      genReciprocalDiv(
+          Root, InsInstrs, InstrIdxForVirtReg,
+          {X86::RCPPSr, X86::MULPSrr, X86::MOVAPSrr, X86::SUBPSrr, X86::MULPSrr,
+           X86::ADDPSrr, X86::MULPSrr},
+          VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 4),
+          Subtarget);
+      break;
+    }
+    break;
+  }
+  DEBUG(dbgs() << "\nAlternate sequence for " << MF.getName() << "\n";
+        for (unsigned i = 0; i < InsInstrs.size(); i++) {
+          dbgs() << i << ": ";
+          InsInstrs[i]->print(dbgs(), false, Root.getParent());
+        });
+  DelInstrs.push_back(&Root); // Record FDiv for deletion
+}
+
+/// Find instructions that can be turned into recip
+static bool getFDIVPatterns(MachineInstr &Root,
+                            SmallVectorImpl<MachineCombinerPattern> &Patterns) {
+  switch (Root.getOpcode()) {
+  default:
+    return false;
+  // TODO: should we support other kinds of instructions?
+  case X86::VDIVSSrr:  // f32
+  case X86::VDIVPSrr:  // v4f32
+  case X86::VDIVPSYrr: // v8f32
+  case X86::DIVSSrr:   // f32
+  case X86::DIVPSrr:   // v4f32
+    break;
+  }
+  auto *MF = Root.getParent()->getParent();
+  auto TLI = MF->getSubtarget().getTargetLowering();
+  EVT VT = getFDivEVT(Root);
+  if (VT == MVT::INVALID_SIMPLE_VALUE_TYPE)
+    return false;
+  switch (TLI->getRecipEstimateDivEnabled(VT, *MF)) {
+  case TLI->ReciprocalEstimate::Disabled:
+    return false;
+  case TLI->ReciprocalEstimate::Unspecified:
+    if (Root.getParent()->getParent()->getTarget().Options.UnsafeFPMath)
+      break;
+    return false;
+  }
+
+  Patterns.push_back(MachineCombinerPattern::Div2RecipEst);
+  return true;
+}
+
+/// Return true when there is potentially a faster code sequence for an
+/// instruction chain ending in \p Root. All potential patterns are listed in
+/// the \p Pattern vector. Pattern should be sorted in priority order since the
+/// pattern evaluator stops checking as soon as it finds a faster sequence.
+
+bool X86InstrInfo::getMachineCombinerPatterns(
+    MachineInstr &Root,
+    SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
+  // FDIV patterns
+  if (getFDIVPatterns(Root, Patterns))
+    return true;
+  // TODO: FSQRT patterns will be prepared after reciprocal implementation
+  // completes
+  return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
+}
+
 /// This is an architecture-specific helper function of reassociateOps.
 /// Set special operand attributes for new instructions after reassociation.
 void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
Index: test/CodeGen/X86/recip-fastmath.ll
===================================================================
--- test/CodeGen/X86/recip-fastmath.ll
+++ test/CodeGen/X86/recip-fastmath.ll
@@ -30,9 +30,9 @@
 define float @f32_one_step(float %x) #1 {
 ; SSE-LABEL: f32_one_step:
 ; SSE:       # BB#0:
+; SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE-NEXT:    rcpss %xmm0, %xmm2
 ; SSE-NEXT:    mulss %xmm2, %xmm0
-; SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE-NEXT:    subss %xmm0, %xmm1
 ; SSE-NEXT:    mulss %xmm2, %xmm1
 ; SSE-NEXT:    addss %xmm2, %xmm1
@@ -41,12 +41,12 @@
 ;
 ; AVX-LABEL: f32_one_step:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vrcpss %xmm0, %xmm0, %xmm1
-; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX-NEXT:    vsubss %xmm0, %xmm2, %xmm0
-; AVX-NEXT:    vmulss %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT:    vrcpss %xmm0, %xmm0, %xmm2
+; AVX-NEXT:    vmulss %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmulss %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vaddss %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %div = fdiv fast float 1.0, %x
   ret float %div
@@ -55,10 +55,10 @@
 define float @f32_two_step(float %x) #2 {
 ; SSE-LABEL: f32_two_step:
 ; SSE:       # BB#0:
+; SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE-NEXT:    rcpss %xmm0, %xmm2
 ; SSE-NEXT:    movaps %xmm0, %xmm3
 ; SSE-NEXT:    mulss %xmm2, %xmm3
-; SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE-NEXT:    movaps %xmm1, %xmm4
 ; SSE-NEXT:    subss %xmm3, %xmm4
 ; SSE-NEXT:    mulss %xmm2, %xmm4
@@ -72,16 +72,16 @@
 ;
 ; AVX-LABEL: f32_two_step:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vrcpss %xmm0, %xmm0, %xmm1
-; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm2
-; AVX-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; AVX-NEXT:    vsubss %xmm2, %xmm3, %xmm2
-; AVX-NEXT:    vmulss %xmm2, %xmm1, %xmm2
-; AVX-NEXT:    vaddss %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vsubss %xmm0, %xmm3, %xmm0
-; AVX-NEXT:    vmulss %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT:    vrcpss %xmm0, %xmm0, %xmm2
+; AVX-NEXT:    vmulss %xmm2, %xmm0, %xmm3
+; AVX-NEXT:    vsubss %xmm3, %xmm1, %xmm3
+; AVX-NEXT:    vmulss %xmm2, %xmm3, %xmm3
+; AVX-NEXT:    vaddss %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vmulss %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmulss %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vaddss %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %div = fdiv fast float 1.0, %x
   ret float %div
@@ -107,9 +107,9 @@
 define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
 ; SSE-LABEL: v4f32_one_step:
 ; SSE:       # BB#0:
+; SSE-NEXT:    movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; SSE-NEXT:    rcpps %xmm0, %xmm2
 ; SSE-NEXT:    mulps %xmm2, %xmm0
-; SSE-NEXT:    movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; SSE-NEXT:    subps %xmm0, %xmm1
 ; SSE-NEXT:    mulps %xmm2, %xmm1
 ; SSE-NEXT:    addps %xmm2, %xmm1
@@ -118,12 +118,12 @@
 ;
 ; AVX-LABEL: v4f32_one_step:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vrcpps %xmm0, %xmm1
-; AVX-NEXT:    vmulps %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT:    vsubps %xmm0, %xmm2, %xmm0
-; AVX-NEXT:    vmulps %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-NEXT:    vrcpps %xmm0, %xmm2
+; AVX-NEXT:    vmulps %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vsubps %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmulps %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %div = fdiv fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
   ret <4 x float> %div
@@ -132,10 +132,10 @@
 define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
 ; SSE-LABEL: v4f32_two_step:
 ; SSE:       # BB#0:
+; SSE-NEXT:    movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; SSE-NEXT:    rcpps %xmm0, %xmm2
 ; SSE-NEXT:    movaps %xmm0, %xmm3
 ; SSE-NEXT:    mulps %xmm2, %xmm3
-; SSE-NEXT:    movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; SSE-NEXT:    movaps %xmm1, %xmm4
 ; SSE-NEXT:    subps %xmm3, %xmm4
 ; SSE-NEXT:    mulps %xmm2, %xmm4
@@ -149,16 +149,16 @@
 ;
 ; AVX-LABEL: v4f32_two_step:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vrcpps %xmm0, %xmm1
-; AVX-NEXT:    vmulps %xmm1, %xmm0, %xmm2
-; AVX-NEXT:    vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT:    vsubps %xmm2, %xmm3, %xmm2
-; AVX-NEXT:    vmulps %xmm2, %xmm1, %xmm2
-; AVX-NEXT:    vaddps %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vmulps %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vsubps %xmm0, %xmm3, %xmm0
-; AVX-NEXT:    vmulps %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-NEXT:    vrcpps %xmm0, %xmm2
+; AVX-NEXT:    vmulps %xmm2, %xmm0, %xmm3
+; AVX-NEXT:    vsubps %xmm3, %xmm1, %xmm3
+; AVX-NEXT:    vmulps %xmm2, %xmm3, %xmm3
+; AVX-NEXT:    vaddps %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vmulps %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vsubps %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmulps %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %div = fdiv fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
   ret <4 x float> %div
@@ -187,9 +187,9 @@
 define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
 ; SSE-LABEL: v8f32_one_step:
 ; SSE:       # BB#0:
+; SSE-NEXT:    movaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; SSE-NEXT:    rcpps %xmm0, %xmm4
 ; SSE-NEXT:    mulps %xmm4, %xmm0
-; SSE-NEXT:    movaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; SSE-NEXT:    movaps %xmm2, %xmm3
 ; SSE-NEXT:    subps %xmm0, %xmm3
 ; SSE-NEXT:    mulps %xmm4, %xmm3
@@ -205,12 +205,12 @@
 ;
 ; AVX-LABEL: v8f32_one_step:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vrcpps %ymm0, %ymm1
-; AVX-NEXT:    vmulps %ymm1, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT:    vsubps %ymm0, %ymm2, %ymm0
-; AVX-NEXT:    vmulps %ymm0, %ymm1, %ymm0
-; AVX-NEXT:    vaddps %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-NEXT:    vrcpps %ymm0, %ymm2
+; AVX-NEXT:    vmulps %ymm2, %ymm0, %ymm0
+; AVX-NEXT:    vsubps %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    vmulps %ymm2, %ymm0, %ymm0
+; AVX-NEXT:    vaddps %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
   ret <8 x float> %div
@@ -220,10 +220,10 @@
 ; SSE-LABEL: v8f32_two_step:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    movaps %xmm1, %xmm2
+; SSE-NEXT:    movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; SSE-NEXT:    rcpps %xmm0, %xmm3
 ; SSE-NEXT:    movaps %xmm0, %xmm4
 ; SSE-NEXT:    mulps %xmm3, %xmm4
-; SSE-NEXT:    movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; SSE-NEXT:    movaps %xmm1, %xmm5
 ; SSE-NEXT:    subps %xmm4, %xmm5
 ; SSE-NEXT:    mulps %xmm3, %xmm5
@@ -249,22 +249,286 @@
 ;
 ; AVX-LABEL: v8f32_two_step:
 ; AVX:       # BB#0:
+; AVX-NEXT:    vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-NEXT:    vrcpps %ymm0, %ymm2
+; AVX-NEXT:    vmulps %ymm2, %ymm0, %ymm3
+; AVX-NEXT:    vsubps %ymm3, %ymm1, %ymm3
+; AVX-NEXT:    vmulps %ymm2, %ymm3, %ymm3
+; AVX-NEXT:    vaddps %ymm2, %ymm3, %ymm2
+; AVX-NEXT:    vmulps %ymm2, %ymm0, %ymm0
+; AVX-NEXT:    vsubps %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    vmulps %ymm2, %ymm0, %ymm0
+; AVX-NEXT:    vaddps %ymm2, %ymm0, %ymm0
+; AVX-NEXT:    retq
+  %div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
+  ret <8 x float> %div
+}
+
+define float @f32_no_step_2(float %x) #3 {
+; SSE-LABEL: f32_no_step_2:
+; SSE:       # BB#0:
+; SSE-NEXT:    rcpss %xmm0, %xmm0
+; SSE-NEXT:    mulss {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: f32_no_step_2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vrcpss %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %div = fdiv fast float 1234.0, %x
+  ret float %div
+}
+
+define float @f32_one_step_2(float %x) #1 {
+; SSE-LABEL: f32_one_step_2:
+; SSE:       # BB#0:
+; SSE-NEXT:    rcpss %xmm0, %xmm2
+; SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT:    mulss %xmm2, %xmm0
+; SSE-NEXT:    subss %xmm0, %xmm1
+; SSE-NEXT:    mulss %xmm2, %xmm1
+; SSE-NEXT:    addss %xmm2, %xmm1
+; SSE-NEXT:    mulss {{.*}}(%rip), %xmm1
+; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: f32_one_step_2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vrcpss %xmm0, %xmm0, %xmm1
+; AVX-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vsubss %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %div = fdiv fast float 3456.0, %x
+  ret float %div
+}
+
+define float @f32_two_step_2(float %x) #2 {
+; SSE-LABEL: f32_two_step_2:
+; SSE:       # BB#0:
+; SSE-NEXT:    rcpss %xmm0, %xmm2
+; SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT:    movaps %xmm0, %xmm3
+; SSE-NEXT:    mulss %xmm2, %xmm3
+; SSE-NEXT:    movaps %xmm1, %xmm4
+; SSE-NEXT:    subss %xmm3, %xmm4
+; SSE-NEXT:    mulss %xmm2, %xmm4
+; SSE-NEXT:    addss %xmm2, %xmm4
+; SSE-NEXT:    mulss %xmm4, %xmm0
+; SSE-NEXT:    subss %xmm0, %xmm1
+; SSE-NEXT:    mulss %xmm4, %xmm1
+; SSE-NEXT:    addss %xmm4, %xmm1
+; SSE-NEXT:    mulss {{.*}}(%rip), %xmm1
+; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: f32_two_step_2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vrcpss %xmm0, %xmm0, %xmm1
+; AVX-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm3
+; AVX-NEXT:    vsubss %xmm3, %xmm2, %xmm3
+; AVX-NEXT:    vmulss %xmm1, %xmm3, %xmm3
+; AVX-NEXT:    vaddss %xmm1, %xmm3, %xmm1
+; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vsubss %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %div = fdiv fast float 6789.0, %x
+  ret float %div
+}
+
+define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
+; SSE-LABEL: v4f32_one_step2:
+; SSE:       # BB#0:
+; SSE-NEXT:    rcpps %xmm0, %xmm2
+; SSE-NEXT:    movaps ${{\.LCPI.*}}, %xmm1
+; SSE-NEXT:    mulps %xmm2, %xmm0
+; SSE-NEXT:    subps %xmm0, %xmm1
+; SSE-NEXT:    mulps %xmm2, %xmm1
+; SSE-NEXT:    addps %xmm2, %xmm1
+; SSE-NEXT:    mulps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v4f32_one_step2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vrcpps %xmm0, %xmm1
+; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-NEXT:    vmulps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vsubps %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vmulps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x
+  ret <4 x float> %div
+}
+
+define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
+; SSE-LABEL: v4f32_two_step2:
+; SSE:       # BB#0:
+; SSE-NEXT:    rcpps %xmm0, %xmm2
+; SSE-NEXT:    movaps ${{\.LCPI.*}}, %xmm1
+; SSE-NEXT:    movaps %xmm0, %xmm3
+; SSE-NEXT:    mulps %xmm2, %xmm3
+; SSE-NEXT:    movaps %xmm1, %xmm4
+; SSE-NEXT:    subps %xmm3, %xmm4
+; SSE-NEXT:    mulps %xmm2, %xmm4
+; SSE-NEXT:    addps %xmm2, %xmm4
+; SSE-NEXT:    mulps %xmm4, %xmm0
+; SSE-NEXT:    subps %xmm0, %xmm1
+; SSE-NEXT:    mulps %xmm4, %xmm1
+; SSE-NEXT:    addps %xmm4, %xmm1
+; SSE-NEXT:    mulps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v4f32_two_step2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vrcpps %xmm0, %xmm1
+; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-NEXT:    vmulps %xmm1, %xmm0, %xmm3
+; AVX-NEXT:    vsubps %xmm3, %xmm2, %xmm3
+; AVX-NEXT:    vmulps %xmm1, %xmm3, %xmm3
+; AVX-NEXT:    vaddps %xmm1, %xmm3, %xmm1
+; AVX-NEXT:    vmulps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vsubps %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vmulps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x
+  ret <4 x float> %div
+}
+
+define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
+; SSE-LABEL: v8f32_one_step2:
+; SSE:       # BB#0:
+; SSE-NEXT:    rcpps %xmm0, %xmm4
+; SSE-NEXT:    movaps ${{\.LCPI.*}}, %xmm2
+; SSE-NEXT:    mulps %xmm4, %xmm0
+; SSE-NEXT:    movaps %xmm2, %xmm3
+; SSE-NEXT:    subps %xmm0, %xmm3
+; SSE-NEXT:    mulps %xmm4, %xmm3
+; SSE-NEXT:    addps %xmm4, %xmm3
+; SSE-NEXT:    mulps {{.*}}(%rip), %xmm3
+; SSE-NEXT:    rcpps %xmm1, %xmm0
+; SSE-NEXT:    mulps %xmm0, %xmm1
+; SSE-NEXT:    subps %xmm1, %xmm2
+; SSE-NEXT:    mulps %xmm0, %xmm2
+; SSE-NEXT:    addps %xmm0, %xmm2
+; SSE-NEXT:    mulps {{.*}}(%rip), %xmm2
+; SSE-NEXT:    movaps %xmm3, %xmm0
+; SSE-NEXT:    movaps %xmm2, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8f32_one_step2:
+; AVX:       # BB#0:
 ; AVX-NEXT:    vrcpps %ymm0, %ymm1
-; AVX-NEXT:    vmulps %ymm1, %ymm0, %ymm2
-; AVX-NEXT:    vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT:    vsubps %ymm2, %ymm3, %ymm2
-; AVX-NEXT:    vmulps %ymm2, %ymm1, %ymm2
-; AVX-NEXT:    vaddps %ymm2, %ymm1, %ymm1
+; AVX-NEXT:    vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vsubps %ymm0, %ymm2, %ymm0
 ; AVX-NEXT:    vmulps %ymm1, %ymm0, %ymm0
-; AVX-NEXT:    vsubps %ymm0, %ymm3, %ymm0
-; AVX-NEXT:    vmulps %ymm0, %ymm1, %ymm0
-; AVX-NEXT:    vaddps %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    retq
+  %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
+  ret <8 x float> %div
+}
+
+define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
+; SSE-LABEL: v8f32_two_step2:
+; SSE:       # BB#0:
+; SSE-NEXT:    movaps %xmm1, %xmm2
+; SSE-NEXT:    rcpps %xmm0, %xmm3
+; SSE-NEXT:    movaps ${{\.LCPI.*}}, %xmm1
+; SSE-NEXT:    movaps %xmm0, %xmm4
+; SSE-NEXT:    mulps %xmm3, %xmm4
+; SSE-NEXT:    movaps %xmm1, %xmm5
+; SSE-NEXT:    subps %xmm4, %xmm5
+; SSE-NEXT:    mulps %xmm3, %xmm5
+; SSE-NEXT:    addps %xmm3, %xmm5
+; SSE-NEXT:    mulps %xmm5, %xmm0
+; SSE-NEXT:    movaps %xmm1, %xmm3
+; SSE-NEXT:    subps %xmm0, %xmm3
+; SSE-NEXT:    mulps %xmm5, %xmm3
+; SSE-NEXT:    addps %xmm5, %xmm3
+; SSE-NEXT:    mulps {{.*}}(%rip), %xmm3
+; SSE-NEXT:    rcpps %xmm2, %xmm0
+; SSE-NEXT:    movaps %xmm2, %xmm4
+; SSE-NEXT:    mulps %xmm0, %xmm4
+; SSE-NEXT:    movaps %xmm1, %xmm5
+; SSE-NEXT:    subps %xmm4, %xmm5
+; SSE-NEXT:    mulps %xmm0, %xmm5
+; SSE-NEXT:    addps %xmm0, %xmm5
+; SSE-NEXT:    mulps %xmm5, %xmm2
+; SSE-NEXT:    subps %xmm2, %xmm1
+; SSE-NEXT:    mulps %xmm5, %xmm1
+; SSE-NEXT:    addps %xmm5, %xmm1
+; SSE-NEXT:    mulps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    movaps %xmm3, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8f32_two_step2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vrcpps %ymm0, %ymm1
+; AVX-NEXT:    vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-NEXT:    vmulps %ymm1, %ymm0, %ymm3
+; AVX-NEXT:    vsubps %ymm3, %ymm2, %ymm3
+; AVX-NEXT:    vmulps %ymm1, %ymm3, %ymm3
+; AVX-NEXT:    vaddps %ymm1, %ymm3, %ymm1
+; AVX-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vsubps %ymm0, %ymm2, %ymm0
+; AVX-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    retq
+  %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
+  ret <8 x float> %div
+}
+
+define <8 x float> @v8f32_no_step(<8 x float> %x) #3 {
+; SSE-LABEL: v8f32_no_step:
+; SSE:       # BB#0:
+; SSE-NEXT:    rcpps %xmm0, %xmm0
+; SSE-NEXT:    rcpps %xmm1, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8f32_no_step:
+; AVX:       # BB#0:
+; AVX-NEXT:    vrcpps %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
   ret <8 x float> %div
 }
 
+define <8 x float> @v8f32_no_step2(<8 x float> %x) #3 {
+; SSE-LABEL: v8f32_no_step2:
+; SSE:       # BB#0:
+; SSE-NEXT:    rcpps %xmm0, %xmm0
+; SSE-NEXT:    mulps {{.*}}(%rip), %xmm0
+; SSE-NEXT:    rcpps %xmm1, %xmm1
+; SSE-NEXT:    mulps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v8f32_no_step2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vrcpps %ymm0, %ymm0
+; AVX-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    retq
+  %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
+  ret <8 x float> %div
+}
+
 attributes #0 = { "unsafe-fp-math"="true" "reciprocal-estimates"="!divf,!vec-divf" }
 attributes #1 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf,vec-divf" }
 attributes #2 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf:2,vec-divf:2" }
+attributes #3 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf:0,vec-divf:0" }