Index: include/llvm/Target/TargetInstrInfo.h =================================================================== --- include/llvm/Target/TargetInstrInfo.h +++ include/llvm/Target/TargetInstrInfo.h @@ -679,16 +679,16 @@ /// order since the pattern evaluator stops checking as soon as it finds a /// faster sequence. /// \param Root - Instruction that could be combined with one of its operands - /// \param Pattern - Vector of possible combination pattern - virtual bool hasPattern( + /// \param Patterns - Vector of possible combination pattern + virtual bool getMachineCombinerPatterns( MachineInstr &Root, - SmallVectorImpl &Pattern) const { + SmallVectorImpl &Patterns) const { return false; } - /// When hasPattern() finds a pattern this function generates the instructions - /// that could replace the original code sequence. The client has to decide - /// whether the actual replacement is beneficial or not. + /// When getMachineCombinerPatterns() finds patterns, this function generates + /// the instructions that could replace the original code sequence. The client + /// has to decide whether the actual replacement is beneficial or not. /// \param Root - Instruction that could be combined with one of its operands /// \param P - Combination pattern for Root /// \param InsInstrs - Vector of new instructions that implement P @@ -697,7 +697,7 @@ /// \param InstrIdxForVirtReg - map of virtual register to instruction in /// InsInstr that defines it virtual void genAlternativeCodeSequence( - MachineInstr &Root, MachineCombinerPattern::MC_PATTERN P, + MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern, SmallVectorImpl &InsInstrs, SmallVectorImpl &DelInstrs, DenseMap &InstrIdxForVirtReg) const { Index: lib/CodeGen/MachineCombiner.cpp =================================================================== --- lib/CodeGen/MachineCombiner.cpp +++ lib/CodeGen/MachineCombiner.cpp @@ -67,10 +67,11 @@ unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot, MachineTraceMetrics::Trace BlockTrace); bool - preservesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root, + improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root, MachineTraceMetrics::Trace BlockTrace, SmallVectorImpl &InsInstrs, - DenseMap &InstrIdxForVirtReg); + DenseMap &InstrIdxForVirtReg, + bool NewCodeHasLessInsts); bool preservesResourceLen(MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace, SmallVectorImpl &InsInstrs, @@ -208,19 +209,24 @@ return NewRootLatency; } -/// True when the new instruction sequence does not -/// lengthen the critical path. The DAGCombine code sequence ends in MI -/// (Machine Instruction) Root. The new code sequence ends in MI NewRoot. A -/// necessary condition for the new sequence to replace the old sequence is that -/// it cannot lengthen the critical path. This is decided by the formula +/// True when the new instruction sequence does not lengthen the critical path +/// and the new sequence has less instructions or the new sequence improves the +/// critical path. +/// The DAGCombine code sequence ends in MI (Machine Instruction) Root. +/// The new code sequence ends in MI NewRoot. A necessary condition for the new +/// sequence to replace the old sequence is that it cannot lengthen the critical +/// path. This is decided by the formula: /// (NewRootDepth + NewRootLatency) <= (RootDepth + RootLatency + RootSlack)). -/// The slack is the number of cycles Root can be delayed before the critical -/// patch becomes longer. -bool MachineCombiner::preservesCriticalPathLen( +/// If the new sequence has an equal length critical path but does not reduce +/// the number of instructions (NewCodeHasLessInsts is false), then it is not +/// considered an improvement. The slack is the number of cycles Root can be +/// delayed before the critical patch becomes longer. +bool MachineCombiner::improvesCriticalPathLen( MachineBasicBlock *MBB, MachineInstr *Root, MachineTraceMetrics::Trace BlockTrace, SmallVectorImpl &InsInstrs, - DenseMap &InstrIdxForVirtReg) { + DenseMap &InstrIdxForVirtReg, + bool NewCodeHasLessInsts) { assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n"); // NewRoot is the last instruction in the \p InsInstrs vector. @@ -245,9 +251,13 @@ dbgs() << " RootDepth + RootLatency + RootSlack " << RootDepth + RootLatency + RootSlack << "\n";); - /// True when the new sequence does not lengthen the critical path. - return ((NewRootDepth + NewRootLatency) <= - (RootDepth + RootLatency + RootSlack)); + unsigned NewCycleCount = NewRootDepth + NewRootLatency; + unsigned OldCycleCount = RootDepth + RootLatency + RootSlack; + + if (NewCodeHasLessInsts) + return NewCycleCount <= OldCycleCount; + else + return NewCycleCount < OldCycleCount; } /// helper routine to convert instructions into SC @@ -322,7 +332,7 @@ auto &MI = *BlockIter++; DEBUG(dbgs() << "INSTR "; MI.dump(); dbgs() << "\n";); - SmallVector Pattern; + SmallVector Patterns; // The motivating example is: // // MUL Other MUL_op1 MUL_op2 Other @@ -345,11 +355,11 @@ // // The algorithm does not try to evaluate all patterns and pick the best. // This is only an artificial restriction though. In practice there is - // mostly one pattern and hasPattern() can order patterns based on an - // internal cost heuristic. + // mostly one pattern, and getMachineCombinerPatterns() can order patterns + // based on an internal cost heuristic. - if (TII->hasPattern(MI, Pattern)) { - for (auto P : Pattern) { + if (TII->getMachineCombinerPatterns(MI, Patterns)) { + for (auto P : Patterns) { SmallVector InsInstrs; SmallVector DelInstrs; DenseMap InstrIdxForVirtReg; @@ -359,18 +369,21 @@ Traces->verifyAnalysis(); TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs, InstrIdxForVirtReg); + unsigned NewInstCount = InsInstrs.size(); + unsigned OldInstCount = DelInstrs.size(); // Found pattern, but did not generate alternative sequence. // This can happen e.g. when an immediate could not be materialized // in a single instruction. - if (!InsInstrs.size()) + if (!NewInstCount) continue; // Substitute when we optimize for codesize and the new sequence has // fewer instructions OR // the new sequence neither lengthens the critical path nor increases // resource pressure. - if (doSubstitute(InsInstrs.size(), DelInstrs.size()) || - (preservesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, - InstrIdxForVirtReg) && + if (doSubstitute(NewInstCount, OldInstCount) || + (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, + InstrIdxForVirtReg, + NewInstCount < OldInstCount) && preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs))) { for (auto *InstrPtr : InsInstrs) MBB->insert((MachineBasicBlock::iterator) &MI, InstrPtr); Index: lib/Target/AArch64/AArch64InstrInfo.h =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.h +++ lib/Target/AArch64/AArch64InstrInfo.h @@ -163,19 +163,17 @@ unsigned SrcReg2, int CmpMask, int CmpValue, const MachineRegisterInfo *MRI) const override; bool optimizeCondBranch(MachineInstr *MI) const override; - /// hasPattern - return true when there is potentially a faster code sequence + /// Return true when there is potentially a faster code sequence /// for an instruction chain ending in . All potential patterns are - /// listed - /// in the array. - bool hasPattern(MachineInstr &Root, - SmallVectorImpl &Pattern) + /// listed in the array. + bool getMachineCombinerPatterns(MachineInstr &Root, + SmallVectorImpl &Patterns) const override; - /// genAlternativeCodeSequence - when hasPattern() finds a pattern - /// this function generates the instructions that could replace the - /// original code sequence + /// When getMachineCombinerPatterns() finds patterns, this function generates + /// the instructions that could replace the original code sequence. void genAlternativeCodeSequence( - MachineInstr &Root, MachineCombinerPattern::MC_PATTERN P, + MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern, SmallVectorImpl &InsInstrs, SmallVectorImpl &DelInstrs, DenseMap &InstrIdxForVirtReg) const override; Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2459,15 +2459,14 @@ return true; } -/// hasPattern - return true when there is potentially a faster code sequence +/// Return true when there is potentially a faster code sequence /// for an instruction chain ending in \p Root. All potential patterns are /// listed /// in the \p Pattern vector. Pattern should be sorted in priority order since /// the pattern evaluator stops checking as soon as it finds a faster sequence. - -bool AArch64InstrInfo::hasPattern( +bool AArch64InstrInfo::getMachineCombinerPatterns( MachineInstr &Root, - SmallVectorImpl &Pattern) const { + SmallVectorImpl &Patterns) const { unsigned Opc = Root.getOpcode(); MachineBasicBlock &MBB = *Root.getParent(); bool Found = false; @@ -2495,76 +2494,76 @@ "ADDWrr does not have register operands"); if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, AArch64::WZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP1); + Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP1); Found = true; } if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr, AArch64::WZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP2); + Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP2); Found = true; } break; case AArch64::ADDXrr: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, AArch64::XZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP1); + Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP1); Found = true; } if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr, AArch64::XZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP2); + Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP2); Found = true; } break; case AArch64::SUBWrr: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, AArch64::WZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP1); + Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP1); Found = true; } if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr, AArch64::WZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP2); + Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP2); Found = true; } break; case AArch64::SUBXrr: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, AArch64::XZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP1); + Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP1); Found = true; } if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr, AArch64::XZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP2); + Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP2); Found = true; } break; case AArch64::ADDWri: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, AArch64::WZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULADDWI_OP1); + Patterns.push_back(MachineCombinerPattern::MC_MULADDWI_OP1); Found = true; } break; case AArch64::ADDXri: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, AArch64::XZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULADDXI_OP1); + Patterns.push_back(MachineCombinerPattern::MC_MULADDXI_OP1); Found = true; } break; case AArch64::SUBWri: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, AArch64::WZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1); + Patterns.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1); Found = true; } break; case AArch64::SUBXri: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, AArch64::XZR)) { - Pattern.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1); + Patterns.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1); Found = true; } break; @@ -2667,9 +2666,9 @@ return MUL; } -/// genAlternativeCodeSequence - when hasPattern() finds a pattern +/// When getMachineCombinerPatterns() finds potential patterns, /// this function generates the instructions that could replace the -/// original code sequence +/// original code sequence. void AArch64InstrInfo::genAlternativeCodeSequence( MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern, SmallVectorImpl &InsInstrs, Index: lib/Target/X86/X86InstrInfo.h =================================================================== --- lib/Target/X86/X86InstrInfo.h +++ lib/Target/X86/X86InstrInfo.h @@ -447,12 +447,12 @@ /// Return true when there is potentially a faster code sequence /// for an instruction chain ending in . All potential patterns are /// output in the array. - bool hasPattern( + bool getMachineCombinerPatterns( MachineInstr &Root, SmallVectorImpl &P) const override; - /// When hasPattern() finds a pattern, this function generates the - /// instructions that could replace the original code sequence. + /// When getMachineCombinerPatterns() finds patterns, this function generates + /// the instructions that could replace the original code sequence. void genAlternativeCodeSequence( MachineInstr &Root, MachineCombinerPattern::MC_PATTERN P, SmallVectorImpl &InsInstrs, Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -6224,24 +6224,10 @@ return isHighLatencyDef(DefMI->getOpcode()); } -/// If the input instruction is part of a chain of dependent ops that are -/// suitable for reassociation, return the earlier instruction in the sequence -/// that defines its first operand, otherwise return a nullptr. -/// If the instruction's operands must be commuted to be considered a -/// reassociation candidate, Commuted will be set to true. -static MachineInstr *isReassocCandidate(const MachineInstr &Inst, - unsigned AssocOpcode, - bool checkPrevOneUse, - bool &Commuted) { - if (Inst.getOpcode() != AssocOpcode) - return nullptr; - - MachineOperand Op1 = Inst.getOperand(1); - MachineOperand Op2 = Inst.getOperand(2); - - const MachineBasicBlock *MBB = Inst.getParent(); +static bool hasVirtualRegDefsInBasicBlock(MachineOperand Op1, + MachineOperand Op2, + const MachineBasicBlock *MBB) { const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); - // We need virtual register definitions. MachineInstr *MI1 = nullptr; MachineInstr *MI2 = nullptr; @@ -6252,7 +6238,33 @@ // And they need to be in the trace (otherwise, they won't have a depth). if (!MI1 || !MI2 || MI1->getParent() != MBB || MI2->getParent() != MBB) - return nullptr; + return false; + + return true; +} + +/// Return true if the input instruction is part of a chain of dependent ops +/// that are suitable for reassociation, otherwise return false. +/// If the instruction's operands must be commuted to have a previous +/// instruction of the same type define the first source operand, Commuted will +/// be set to true. +static bool isReassocCandidate(const MachineInstr &Inst, unsigned AssocOpcode, + bool &Commuted) { + if (Inst.getOpcode() != AssocOpcode) + return false; + + assert(Inst.getNumOperands() == 3 && + "Must be a binary operator for reassociation"); + + const MachineBasicBlock *MBB = Inst.getParent(); + MachineOperand Op1 = Inst.getOperand(1); + MachineOperand Op2 = Inst.getOperand(2); + if (!hasVirtualRegDefsInBasicBlock(Op1, Op2, MBB)) + return false; + + const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); + MachineInstr *MI1 = MRI.getUniqueVRegDef(Op1.getReg()); + MachineInstr *MI2 = MRI.getUniqueVRegDef(Op2.getReg()); Commuted = false; if (MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode) { @@ -6260,67 +6272,55 @@ Commuted = true; } - // Avoid reassociating operands when it won't provide any benefit. If both - // operands are produced by instructions of this type, we may already - // have the optimal sequence. - if (MI2->getOpcode() == AssocOpcode) - return nullptr; - - // The instruction must only be used by the other instruction that we - // reassociate with. - if (checkPrevOneUse && !MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg())) - return nullptr; - - // We must match a simple chain of dependent ops. - // TODO: This check is not necessary for the earliest instruction in the - // sequence. Instead of a sequence of 3 dependent instructions with the same - // opcode, we only need to find a sequence of 2 dependent instructions with - // the same opcode plus 1 other instruction that adds to the height of the - // trace. + // We need a previous instruction of the same type to reassociate. if (MI1->getOpcode() != AssocOpcode) - return nullptr; - - return MI1; -} + return false; -/// Select a pattern based on how the operands of each associative operation -/// need to be commuted. -static MachineCombinerPattern::MC_PATTERN getPattern(bool CommutePrev, - bool CommuteRoot) { - if (CommutePrev) { - if (CommuteRoot) - return MachineCombinerPattern::MC_REASSOC_XA_YB; - return MachineCombinerPattern::MC_REASSOC_XA_BY; - } else { - if (CommuteRoot) - return MachineCombinerPattern::MC_REASSOC_AX_YB; - return MachineCombinerPattern::MC_REASSOC_AX_BY; - } + // The previous instruction must only be used by the instruction that we + // reassociate with. + if (!MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg())) + return false; + + MachineOperand Op11 = MI1->getOperand(1); + MachineOperand Op12 = MI1->getOperand(2); + if (!hasVirtualRegDefsInBasicBlock(Op11, Op12, MBB)) + return false; + + return true; } -bool X86InstrInfo::hasPattern(MachineInstr &Root, - SmallVectorImpl &Pattern) const { +bool X86InstrInfo::getMachineCombinerPatterns(MachineInstr &Root, + SmallVectorImpl &Patterns) const { if (!Root.getParent()->getParent()->getTarget().Options.UnsafeFPMath) return false; + // TODO: There is nothing x86-specific here except the instruction type. + // This logic could be hoisted into the machine combiner pass itself. + + // Look for this reassociation pattern: + // B = A op X (Prev) + // C = B op Y (Root) + // TODO: There are many more associative instruction types to match: // 1. Other forms of scalar FP add (non-AVX) // 2. Other data types (double, integer, vectors) // 3. Other math / logic operations (mul, and, or) unsigned AssocOpcode = X86::VADDSSrr; - // TODO: There is nothing x86-specific here except the instruction type. - // This logic could be hoisted into the machine combiner pass itself. - bool CommuteRoot; - if (MachineInstr *Prev = isReassocCandidate(Root, AssocOpcode, true, - CommuteRoot)) { - bool CommutePrev; - if (isReassocCandidate(*Prev, AssocOpcode, false, CommutePrev)) { - // We found a sequence of instructions that may be suitable for a - // reassociation of operands to increase ILP. - Pattern.push_back(getPattern(CommutePrev, CommuteRoot)); - return true; + bool Commute = false; + if (isReassocCandidate(Root, AssocOpcode, Commute)) { + // We found a sequence of instructions that may be suitable for a + // reassociation of operands to increase ILP. Specify each commutation + // possibility for the Prev instruction in the sequence and let the + // machine combiner decide if changing the operands is worthwhile. + if (Commute) { + Patterns.push_back(MachineCombinerPattern::MC_REASSOC_AX_YB); + Patterns.push_back(MachineCombinerPattern::MC_REASSOC_XA_YB); + } else { + Patterns.push_back(MachineCombinerPattern::MC_REASSOC_AX_BY); + Patterns.push_back(MachineCombinerPattern::MC_REASSOC_XA_BY); } + return true; } return false; @@ -6415,14 +6415,16 @@ // Select the previous instruction in the sequence based on the input pattern. MachineInstr *Prev = nullptr; - if (Pattern == MachineCombinerPattern::MC_REASSOC_AX_BY || - Pattern == MachineCombinerPattern::MC_REASSOC_XA_BY) - Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg()); - else if (Pattern == MachineCombinerPattern::MC_REASSOC_AX_YB || - Pattern == MachineCombinerPattern::MC_REASSOC_XA_YB) - Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); - else - llvm_unreachable("Unknown pattern for machine combiner"); + switch (Pattern) { + case MachineCombinerPattern::MC_REASSOC_AX_BY: + case MachineCombinerPattern::MC_REASSOC_XA_BY: + Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg()); + break; + case MachineCombinerPattern::MC_REASSOC_AX_YB: + case MachineCombinerPattern::MC_REASSOC_XA_YB: + Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); + } + assert(Prev && "Unknown pattern for machine combiner"); reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg); return; Index: test/CodeGen/X86/fp-fast.ll =================================================================== --- test/CodeGen/X86/fp-fast.ll +++ test/CodeGen/X86/fp-fast.ll @@ -114,81 +114,3 @@ ret float %t2 } -; Verify that the first two adds are independent regardless of how the inputs are -; commuted. The destination registers are used as source registers for the third add. - -define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) { -; CHECK-LABEL: reassociate_adds1: -; CHECK: # BB#0: -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: retq - %t0 = fadd float %x0, %x1 - %t1 = fadd float %t0, %x2 - %t2 = fadd float %t1, %x3 - ret float %t2 -} - -define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) { -; CHECK-LABEL: reassociate_adds2: -; CHECK: # BB#0: -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: retq - %t0 = fadd float %x0, %x1 - %t1 = fadd float %x2, %t0 - %t2 = fadd float %t1, %x3 - ret float %t2 -} - -define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) { -; CHECK-LABEL: reassociate_adds3: -; CHECK: # BB#0: -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: retq - %t0 = fadd float %x0, %x1 - %t1 = fadd float %t0, %x2 - %t2 = fadd float %x3, %t1 - ret float %t2 -} - -define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) { -; CHECK-LABEL: reassociate_adds4: -; CHECK: # BB#0: -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: retq - %t0 = fadd float %x0, %x1 - %t1 = fadd float %x2, %t0 - %t2 = fadd float %x3, %t1 - ret float %t2 -} - -; Verify that we reassociate some of these ops. The optimal balanced tree of adds is not -; produced because that would cost more compile time. - -define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) { -; CHECK-LABEL: reassociate_adds5: -; CHECK: # BB#0: -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddss %xmm5, %xmm4, %xmm1 -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddss %xmm7, %xmm6, %xmm1 -; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: retq - %t0 = fadd float %x0, %x1 - %t1 = fadd float %t0, %x2 - %t2 = fadd float %t1, %x3 - %t3 = fadd float %t2, %x4 - %t4 = fadd float %t3, %x5 - %t5 = fadd float %t4, %x6 - %t6 = fadd float %t5, %x7 - ret float %t6 -} Index: test/CodeGen/X86/machine-combiner.ll =================================================================== --- test/CodeGen/X86/machine-combiner.ll +++ test/CodeGen/X86/machine-combiner.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx -enable-unsafe-fp-math < %s | FileCheck %s + +; Verify that the first two adds are independent regardless of how the inputs are +; commuted. The destination registers are used as source registers for the third add. + +define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) { +; CHECK-LABEL: reassociate_adds1: +; CHECK: # BB#0: +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq + %t0 = fadd float %x0, %x1 + %t1 = fadd float %t0, %x2 + %t2 = fadd float %t1, %x3 + ret float %t2 +} + +define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) { +; CHECK-LABEL: reassociate_adds2: +; CHECK: # BB#0: +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq + %t0 = fadd float %x0, %x1 + %t1 = fadd float %x2, %t0 + %t2 = fadd float %t1, %x3 + ret float %t2 +} + +define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) { +; CHECK-LABEL: reassociate_adds3: +; CHECK: # BB#0: +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq + %t0 = fadd float %x0, %x1 + %t1 = fadd float %t0, %x2 + %t2 = fadd float %x3, %t1 + ret float %t2 +} + +define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) { +; CHECK-LABEL: reassociate_adds4: +; CHECK: # BB#0: +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq + %t0 = fadd float %x0, %x1 + %t1 = fadd float %x2, %t0 + %t2 = fadd float %x3, %t1 + ret float %t2 +} + +; Verify that we reassociate some of these ops. The optimal balanced tree of adds is not +; produced because that would cost more compile time. + +define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) { +; CHECK-LABEL: reassociate_adds5: +; CHECK: # BB#0: +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm5, %xmm4, %xmm1 +; CHECK-NEXT: vaddss %xmm6, %xmm1, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm7, %xmm0, %xmm0 +; CHECK-NEXT: retq + %t0 = fadd float %x0, %x1 + %t1 = fadd float %t0, %x2 + %t2 = fadd float %t1, %x3 + %t3 = fadd float %t2, %x4 + %t4 = fadd float %t3, %x5 + %t5 = fadd float %t4, %x6 + %t6 = fadd float %t5, %x7 + ret float %t6 +} + +; Verify that we only need two associative operations to reassociate the operands. +; Also, we should reassociate such that the result of the high latency division +; is used by the final 'add' rather than reassociating the %x3 operand with the +; division. The latter reassociation would not improve anything. + +define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) { +; CHECK-LABEL: reassociate_adds6: +; CHECK: # BB#0: +; CHECK-NEXT: vdivss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq + %t0 = fdiv float %x0, %x1 + %t1 = fadd float %x2, %t0 + %t2 = fadd float %x3, %t1 + ret float %t2 +} +