diff --git a/llvm/include/llvm/CodeGen/ReachingDefAnalysis.h b/llvm/include/llvm/CodeGen/ReachingDefAnalysis.h --- a/llvm/include/llvm/CodeGen/ReachingDefAnalysis.h +++ b/llvm/include/llvm/CodeGen/ReachingDefAnalysis.h @@ -189,6 +189,7 @@ /// definition is found, recursively search the predecessor blocks for them. void getLiveOuts(MachineBasicBlock *MBB, int PhysReg, InstSet &Defs, BlockSet &VisitedBBs) const; + void getLiveOuts(MachineBasicBlock *MBB, int PhysReg, InstSet &Defs) const; /// For the given block, collect the instructions that use the live-in /// value of the provided register. Return whether the value is still diff --git a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp --- a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp +++ b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp @@ -389,6 +389,12 @@ } } +void ReachingDefAnalysis::getLiveOuts(MachineBasicBlock *MBB, int PhysReg, + InstSet &Defs) const { + SmallPtrSet VisitedBBs; + getLiveOuts(MBB, PhysReg, Defs, VisitedBBs); +} + void ReachingDefAnalysis::getLiveOuts(MachineBasicBlock *MBB, int PhysReg, InstSet &Defs, BlockSet &VisitedBBs) const { diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h @@ -679,7 +679,6 @@ static inline bool isMovRegOpcode(int Opc) { return Opc == ARM::MOVr || Opc == ARM::tMOVr || Opc == ARM::t2MOVr; } - /// isValidCoprocessorNumber - decide whether an explicit coprocessor /// number is legal in generic instructions like CDP. The answer can /// vary with the subtarget. diff --git a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp --- a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp +++ b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp @@ -201,12 +201,25 @@ PredicatedMI *getDivergent() const { return Divergent; } }; + struct Reduction { + MachineInstr *Init; + MachineInstr &Copy; + MachineInstr &Reduce; + MachineInstr &VPSEL; + + Reduction(MachineInstr *Init, MachineInstr *Mov, MachineInstr *Add, + MachineInstr *Sel) + : Init(Init), Copy(*Mov), Reduce(*Add), VPSEL(*Sel) { } + }; + struct LowOverheadLoop { MachineLoop &ML; + MachineBasicBlock *Preheader = nullptr; MachineLoopInfo &MLI; ReachingDefAnalysis &RDA; const TargetRegisterInfo &TRI; + const ARMBaseInstrInfo &TII; MachineFunction *MF = nullptr; MachineInstr *InsertPt = nullptr; MachineInstr *Start = nullptr; @@ -218,14 +231,20 @@ SetVector CurrentPredicate; SmallVector VPTBlocks; SmallPtrSet ToRemove; + SmallVector, 1> Reductions; SmallPtrSet BlockMasksToRecompute; bool Revert = false; bool CannotTailPredicate = false; LowOverheadLoop(MachineLoop &ML, MachineLoopInfo &MLI, - ReachingDefAnalysis &RDA, const TargetRegisterInfo &TRI) - : ML(ML), MLI(MLI), RDA(RDA), TRI(TRI) { + ReachingDefAnalysis &RDA, const TargetRegisterInfo &TRI, + const ARMBaseInstrInfo &TII) + : ML(ML), MLI(MLI), RDA(RDA), TRI(TRI), TII(TII) { MF = ML.getHeader()->getParent(); + if (auto *MBB = ML.getLoopPreheader()) + Preheader = MBB; + else if (auto *MBB = MLI.findLoopPreheader(&ML, true)) + Preheader = MBB; } // If this is an MVE instruction, check that we know how to use tail @@ -249,9 +268,13 @@ // of elements to the loop start instruction. bool ValidateTailPredicate(MachineInstr *StartInsertPt); + // See whether the live-out instructions are a reduction that we can fixup + // later. + bool FindValidReduction(InstSet &LiveMIs, InstSet &LiveOutUsers); + // Check that any values available outside of the loop will be the same // after tail predication conversion. - bool ValidateLiveOuts() const; + bool ValidateLiveOuts(); // Is it safe to define LR with DLS/WLS? // LR can be defined if it is the operand to start, because it's the same @@ -341,6 +364,8 @@ void ConvertVPTBlocks(LowOverheadLoop &LoLoop); + void FixupReductions(LowOverheadLoop &LoLoop) const; + MachineInstr *ExpandLoopStart(LowOverheadLoop &LoLoop); void Expand(LowOverheadLoop &LoLoop); @@ -481,7 +506,7 @@ }; // First, find the block that looks like the preheader. - MachineBasicBlock *MBB = MLI.findLoopPreheader(&ML, true); + MachineBasicBlock *MBB = Preheader; if (!MBB) { LLVM_DEBUG(dbgs() << "ARM Loops: Didn't find preheader.\n"); return false; @@ -626,7 +651,109 @@ return true; } -bool LowOverheadLoop::ValidateLiveOuts() const { +bool +LowOverheadLoop::FindValidReduction(InstSet &LiveMIs, InstSet &LiveOutUsers) { + // Also check for reductions where the operation needs to be merging values + // from the last and previous loop iterations. This means an instruction + // producing a value and a vmov storing the value calculated in the previous + // iteration. So we can have two live-out regs, one produced by a vmov and + // both being consumed by a vpsel. + LLVM_DEBUG(dbgs() << "ARM Loops: Looking for reduction live-outs:\n"; + for (auto *MI : LiveMIs) + dbgs() << " - " << *MI); + + if (!Preheader) + return false; + + // Expect a vmov, a vadd and a single vpsel user. + // TODO: This means we can't currently support multiple reductions in the + // loop. + if (LiveMIs.size() != 2 || LiveOutUsers.size() != 1) + return false; + + MachineInstr *VPSEL = *LiveOutUsers.begin(); + if (VPSEL->getOpcode() != ARM::MVE_VPSEL) + return false; + + unsigned VPRIdx = llvm::findFirstVPTPredOperandIdx(*VPSEL) + 1; + MachineInstr *Pred = RDA.getMIOperand(VPSEL, VPRIdx); + if (!Pred || Pred != VCTP) { + LLVM_DEBUG(dbgs() << "ARM Loops: Not using equivalent predicate.\n"); + return false; + } + + MachineInstr *Reduce = RDA.getMIOperand(VPSEL, 1); + if (!Reduce) + return false; + + assert(LiveMIs.count(Reduce) && "Expected MI to be live-out"); + + // TODO: Support more operations than VADD. + switch (VCTP->getOpcode()) { + default: + return false; + case ARM::MVE_VCTP8: + if (Reduce->getOpcode() != ARM::MVE_VADDi8) + return false; + break; + case ARM::MVE_VCTP16: + if (Reduce->getOpcode() != ARM::MVE_VADDi16) + return false; + break; + case ARM::MVE_VCTP32: + if (Reduce->getOpcode() != ARM::MVE_VADDi32) + return false; + break; + } + + // Test that the reduce op is overwriting ones of its operands. + if (Reduce->getOperand(0).getReg() != Reduce->getOperand(1).getReg() && + Reduce->getOperand(0).getReg() != Reduce->getOperand(2).getReg()) { + LLVM_DEBUG(dbgs() << "ARM Loops: Reducing op isn't overwriting itself.\n"); + return false; + } + + // Check that the VORR is actually a VMOV. + MachineInstr *Copy = RDA.getMIOperand(VPSEL, 2); + if (!Copy || Copy->getOpcode() != ARM::MVE_VORR || + !Copy->getOperand(1).isReg() || !Copy->getOperand(2).isReg() || + Copy->getOperand(1).getReg() != Copy->getOperand(2).getReg()) + return false; + + assert(LiveMIs.count(Copy) && "Expected MI to be live-out"); + + // Check that the vadd and vmov are only used by each other and the vpsel. + SmallPtrSet CopyUsers; + RDA.getGlobalUses(Copy, Copy->getOperand(0).getReg(), CopyUsers); + if (CopyUsers.size() > 2 || !CopyUsers.count(Reduce)) { + LLVM_DEBUG(dbgs() << "ARM Loops: Copy users unsupported.\n"); + return false; + } + + SmallPtrSet ReduceUsers; + RDA.getGlobalUses(Reduce, Reduce->getOperand(0).getReg(), ReduceUsers); + if (ReduceUsers.size() > 2 || !ReduceUsers.count(Copy)) { + LLVM_DEBUG(dbgs() << "ARM Loops: Reduce users unsupported.\n"); + return false; + } + + // Then find whether there's an instruction initialising the register that + // is storing the reduction. + SmallPtrSet Incoming; + RDA.getLiveOuts(Preheader, Copy->getOperand(1).getReg(), Incoming); + if (Incoming.size() > 1) + return false; + + MachineInstr *Init = Incoming.empty() ? nullptr : *Incoming.begin(); + LLVM_DEBUG(dbgs() << "ARM Loops: Found a reduction:\n" + << " - " << *Copy + << " - " << *Reduce + << " - " << *VPSEL); + Reductions.push_back(std::make_unique(Init, Copy, Reduce, VPSEL)); + return true; +} + +bool LowOverheadLoop::ValidateLiveOuts() { // We want to find out if the tail-predicated version of this loop will // produce the same values as the loop in its original form. For this to // be true, the newly inserted implicit predication must not change the @@ -652,9 +779,9 @@ SetVector FalseLanesUnknown; SmallPtrSet FalseLanesZero; SmallPtrSet Predicated; - MachineBasicBlock *MBB = ML.getHeader(); + MachineBasicBlock *Header = ML.getHeader(); - for (auto &MI : *MBB) { + for (auto &MI : *Header) { const MCInstrDesc &MCID = MI.getDesc(); uint64_t Flags = MCID.TSFlags; if ((Flags & ARMII::DomainMask) != ARMII::DomainMVE) @@ -702,6 +829,9 @@ // stored and then we can work towards the leaves, hopefully adding more // instructions to Predicated. Successfully terminating the loop means that // all the unknown values have to found to be masked by predicated user(s). + // For any unpredicated values, we store them in NonPredicated so that we + // can later check whether these form a reduction. + SmallPtrSet NonPredicated; for (auto *MI : reverse(FalseLanesUnknown)) { for (auto &MO : MI->operands()) { if (!isRegInClass(MO, QPRs) || !MO.isDef()) @@ -709,39 +839,48 @@ if (!HasPredicatedUsers(MI, MO, Predicated)) { LLVM_DEBUG(dbgs() << "ARM Loops: Found an unknown def of : " << TRI.getRegAsmName(MO.getReg()) << " at " << *MI); - return false; + NonPredicated.insert(MI); + continue; } } // Any unknown false lanes have been masked away by the user(s). Predicated.insert(MI); } - // Collect Q-regs that are live in the exit blocks. We don't collect scalars - // because they won't be affected by lane predication. - SmallSet LiveOuts; + SmallPtrSet LiveOutMIs; + SmallPtrSet LiveOutUsers; SmallVector ExitBlocks; ML.getExitBlocks(ExitBlocks); - for (auto *MBB : ExitBlocks) - for (const MachineBasicBlock::RegisterMaskPair &RegMask : MBB->liveins()) - if (QPRs->contains(RegMask.PhysReg)) - LiveOuts.insert(RegMask.PhysReg); - - // Collect the instructions in the loop body that define the live-out values. - SmallPtrSet LiveMIs; assert(ML.getNumBlocks() == 1 && "Expected single block loop!"); - for (auto Reg : LiveOuts) - if (auto *MI = RDA.getLocalLiveOutMIDef(MBB, Reg)) - LiveMIs.insert(MI); + assert(ExitBlocks.size() == 1 && "Expected a single exit block"); + MachineBasicBlock *ExitBB = ExitBlocks.front(); + for (const MachineBasicBlock::RegisterMaskPair &RegMask : ExitBB->liveins()) { + // Check Q-regs that are live in the exit blocks. We don't collect scalars + // because they won't be affected by lane predication. + if (QPRs->contains(RegMask.PhysReg)) { + if (auto *MI = RDA.getLocalLiveOutMIDef(Header, RegMask.PhysReg)) + LiveOutMIs.insert(MI); + RDA.getLiveInUses(ExitBB, RegMask.PhysReg, LiveOutUsers); + } + } + + // If we have any non-predicated live-outs, they need to be part of a + // reduction that we can fixup later. The reduction that the form of an + // operation that uses its previous values through a vmov and then a vpsel + // resides in the exit blocks to select the final bytes from n and n-1 + // iterations. + if (!NonPredicated.empty() && + !FindValidReduction(NonPredicated, LiveOutUsers)) + return false; - LLVM_DEBUG(dbgs() << "ARM Loops: Found loop live-outs:\n"; - for (auto *MI : LiveMIs) - dbgs() << " - " << *MI); // We've already validated that any VPT predication within the loop will be // equivalent when we perform the predication transformation; so we know that // any VPT predicated instruction is predicated upon VCTP. Any live-out - // instruction needs to be predicated, so check this here. - for (auto *MI : LiveMIs) - if (!isVectorPredicated(MI)) + // instruction needs to be predicated, so check this here. The instructions + // in NonPredicated have been found to be a reduction that we can ensure its + // legality. + for (auto *MI : LiveOutMIs) + if (!isVectorPredicated(MI) && !NonPredicated.count(MI)) return false; return true; @@ -949,14 +1088,12 @@ return nullptr; }; - LowOverheadLoop LoLoop(*ML, *MLI, *RDA, *TRI); + LowOverheadLoop LoLoop(*ML, *MLI, *RDA, *TRI, *TII); // Search the preheader for the start intrinsic. // FIXME: I don't see why we shouldn't be supporting multiple predecessors // with potentially multiple set.loop.iterations, so we need to enable this. - if (auto *Preheader = ML->getLoopPreheader()) - LoLoop.Start = SearchForStart(Preheader); - else if (auto *Preheader = MLI->findLoopPreheader(ML, true)) - LoLoop.Start = SearchForStart(Preheader); + if (LoLoop.Preheader) + LoLoop.Start = SearchForStart(LoLoop.Preheader); else return false; @@ -1210,6 +1347,61 @@ return &*MIB; } +void ARMLowOverheadLoops::FixupReductions(LowOverheadLoop &LoLoop) const { + LLVM_DEBUG(dbgs() << "ARM Loops: Fixing up reduction(s).\n"); + auto BuildMov = [this](MachineInstr &InsertPt, Register To, Register From) { + MachineBasicBlock *MBB = InsertPt.getParent(); + MachineInstrBuilder MIB = + BuildMI(*MBB, &InsertPt, InsertPt.getDebugLoc(), TII->get(ARM::MVE_VORR)); + MIB.addDef(To); + MIB.addReg(From); + MIB.addReg(From); + MIB.addImm(0); + MIB.addReg(0); + MIB.addReg(To); + LLVM_DEBUG(dbgs() << "ARM Loops: Inserted VMOV: " << *MIB); + }; + + for (auto &Reduction : LoLoop.Reductions) { + MachineInstr &Copy = Reduction->Copy; + MachineInstr &Reduce = Reduction->Reduce; + Register DestReg = Copy.getOperand(0).getReg(); + + // Change the initialiser if present + if (Reduction->Init) { + MachineInstr *Init = Reduction->Init; + + for (unsigned i = 0; i < Init->getNumOperands(); ++i) { + MachineOperand &MO = Init->getOperand(i); + if (MO.isReg() && MO.isUse() && MO.isTied() && + Init->findTiedOperandIdx(i) == 0) + Init->getOperand(i).setReg(DestReg); + } + Init->getOperand(0).setReg(DestReg); + LLVM_DEBUG(dbgs() << "ARM Loops: Changed init regs: " << *Init); + } else + BuildMov(LoLoop.Preheader->instr_back(), DestReg, Copy.getOperand(1).getReg()); + + // Change the reducing op to write to the register that is used to copy + // its value on the next iteration. Also update the tied-def operand. + Reduce.getOperand(0).setReg(DestReg); + Reduce.getOperand(5).setReg(DestReg); + LLVM_DEBUG(dbgs() << "ARM Loops: Changed reduction regs: " << Reduce); + + // Instead of a vpsel, just copy the register into the necessary one. + MachineInstr &VPSEL = Reduction->VPSEL; + if (VPSEL.getOperand(0).getReg() != DestReg) + BuildMov(VPSEL, VPSEL.getOperand(0).getReg(), DestReg); + + // Remove the unnecessary instructions. + LLVM_DEBUG(dbgs() << "ARM Loops: Removing:\n" + << " - " << Copy + << " - " << VPSEL << "\n"); + Copy.eraseFromParent(); + VPSEL.eraseFromParent(); + } +} + void ARMLowOverheadLoops::ConvertVPTBlocks(LowOverheadLoop &LoLoop) { auto RemovePredicate = [](MachineInstr *MI) { LLVM_DEBUG(dbgs() << "ARM Loops: Removing predicate from: " << *MI); @@ -1363,8 +1555,10 @@ RemoveDeadBranch(LoLoop.Start); LoLoop.End = ExpandLoopEnd(LoLoop); RemoveDeadBranch(LoLoop.End); - if (LoLoop.IsTailPredicationLegal()) + if (LoLoop.IsTailPredicationLegal()) { ConvertVPTBlocks(LoLoop); + FixupReductions(LoLoop); + } for (auto *I : LoLoop.ToRemove) { LLVM_DEBUG(dbgs() << "ARM Loops: Erasing " << *I); I->eraseFromParent(); diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll --- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll +++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll @@ -152,26 +152,16 @@ ; CHECK-NEXT: uxtbeq r0, r0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: add.w r3, r2, #15 -; CHECK-NEXT: vmov.i32 q1, #0x0 -; CHECK-NEXT: bic r3, r3, #15 -; CHECK-NEXT: sub.w r12, r3, #16 -; CHECK-NEXT: movs r3, #1 -; CHECK-NEXT: add.w lr, r3, r12, lsr #4 -; CHECK-NEXT: dls lr, lr +; CHECK-NEXT: vmov.i32 q0, #0x0 +; CHECK-NEXT: dlstp.8 lr, r2 ; CHECK-NEXT: .LBB2_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vctp.8 r2 -; CHECK-NEXT: vmov q0, q1 -; CHECK-NEXT: vpstt -; CHECK-NEXT: vldrbt.u8 q1, [r1], #16 -; CHECK-NEXT: vldrbt.u8 q2, [r0], #16 -; CHECK-NEXT: subs r2, #16 +; CHECK-NEXT: vldrb.u8 q1, [r1], #16 +; CHECK-NEXT: vldrb.u8 q2, [r0], #16 ; CHECK-NEXT: vsub.i8 q1, q2, q1 -; CHECK-NEXT: vadd.i8 q1, q1, q0 -; CHECK-NEXT: le lr, .LBB2_1 +; CHECK-NEXT: vadd.i8 q0, q1, q0 +; CHECK-NEXT: letp lr, .LBB2_1 ; CHECK-NEXT: @ %bb.2: @ %middle.block -; CHECK-NEXT: vpsel q0, q1, q0 ; CHECK-NEXT: vaddv.u8 r0, q0 ; CHECK-NEXT: pop.w {r7, lr} ; CHECK-NEXT: uxtb r0, r0 @@ -221,26 +211,16 @@ ; CHECK-NEXT: sxtheq r0, r0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: adds r3, r2, #7 -; CHECK-NEXT: vmov.i32 q1, #0x0 -; CHECK-NEXT: bic r3, r3, #7 -; CHECK-NEXT: sub.w r12, r3, #8 -; CHECK-NEXT: movs r3, #1 -; CHECK-NEXT: add.w lr, r3, r12, lsr #3 -; CHECK-NEXT: dls lr, lr +; CHECK-NEXT: vmov.i32 q0, #0x0 +; CHECK-NEXT: dlstp.16 lr, r2 ; CHECK-NEXT: .LBB3_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vctp.16 r2 -; CHECK-NEXT: vmov q0, q1 -; CHECK-NEXT: vpstt -; CHECK-NEXT: vldrbt.u16 q1, [r0], #8 -; CHECK-NEXT: vldrbt.u16 q2, [r1], #8 -; CHECK-NEXT: subs r2, #8 +; CHECK-NEXT: vldrb.u16 q1, [r0], #8 +; CHECK-NEXT: vldrb.u16 q2, [r1], #8 ; CHECK-NEXT: vsub.i16 q1, q2, q1 -; CHECK-NEXT: vadd.i16 q1, q1, q0 -; CHECK-NEXT: le lr, .LBB3_1 +; CHECK-NEXT: vadd.i16 q0, q1, q0 +; CHECK-NEXT: letp lr, .LBB3_1 ; CHECK-NEXT: @ %bb.2: @ %middle.block -; CHECK-NEXT: vpsel q0, q1, q0 ; CHECK-NEXT: vaddv.u16 r0, q0 ; CHECK-NEXT: pop.w {r7, lr} ; CHECK-NEXT: sxth r0, r0 @@ -292,26 +272,16 @@ ; CHECK-NEXT: uxtbeq r0, r0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: add.w r3, r2, #15 -; CHECK-NEXT: vmov.i32 q1, #0x0 -; CHECK-NEXT: bic r3, r3, #15 -; CHECK-NEXT: sub.w r12, r3, #16 -; CHECK-NEXT: movs r3, #1 -; CHECK-NEXT: add.w lr, r3, r12, lsr #4 -; CHECK-NEXT: dls lr, lr +; CHECK-NEXT: vmov.i32 q0, #0x0 +; CHECK-NEXT: dlstp.8 lr, r2 ; CHECK-NEXT: .LBB4_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vctp.8 r2 -; CHECK-NEXT: vmov q0, q1 -; CHECK-NEXT: vpstt -; CHECK-NEXT: vldrbt.u8 q1, [r0], #16 -; CHECK-NEXT: vldrbt.u8 q2, [r1], #16 -; CHECK-NEXT: subs r2, #16 +; CHECK-NEXT: vldrb.u8 q1, [r0], #16 +; CHECK-NEXT: vldrb.u8 q2, [r1], #16 ; CHECK-NEXT: vmul.i8 q1, q2, q1 -; CHECK-NEXT: vadd.i8 q1, q1, q0 -; CHECK-NEXT: le lr, .LBB4_1 +; CHECK-NEXT: vadd.i8 q0, q1, q0 +; CHECK-NEXT: letp lr, .LBB4_1 ; CHECK-NEXT: @ %bb.2: @ %middle.block -; CHECK-NEXT: vpsel q0, q1, q0 ; CHECK-NEXT: vaddv.u8 r0, q0 ; CHECK-NEXT: pop.w {r7, lr} ; CHECK-NEXT: uxtb r0, r0 @@ -361,26 +331,16 @@ ; CHECK-NEXT: sxtheq r0, r0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: adds r3, r2, #7 -; CHECK-NEXT: vmov.i32 q1, #0x0 -; CHECK-NEXT: bic r3, r3, #7 -; CHECK-NEXT: sub.w r12, r3, #8 -; CHECK-NEXT: movs r3, #1 -; CHECK-NEXT: add.w lr, r3, r12, lsr #3 -; CHECK-NEXT: dls lr, lr +; CHECK-NEXT: vmov.i32 q0, #0x0 +; CHECK-NEXT: dlstp.16 lr, r2 ; CHECK-NEXT: .LBB5_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vctp.16 r2 -; CHECK-NEXT: vmov q0, q1 -; CHECK-NEXT: vpstt -; CHECK-NEXT: vldrbt.u16 q1, [r0], #8 -; CHECK-NEXT: vldrbt.u16 q2, [r1], #8 -; CHECK-NEXT: subs r2, #8 +; CHECK-NEXT: vldrb.u16 q1, [r0], #8 +; CHECK-NEXT: vldrb.u16 q2, [r1], #8 ; CHECK-NEXT: vmul.i16 q1, q2, q1 -; CHECK-NEXT: vadd.i16 q1, q1, q0 -; CHECK-NEXT: le lr, .LBB5_1 +; CHECK-NEXT: vadd.i16 q0, q1, q0 +; CHECK-NEXT: letp lr, .LBB5_1 ; CHECK-NEXT: @ %bb.2: @ %middle.block -; CHECK-NEXT: vpsel q0, q1, q0 ; CHECK-NEXT: vaddv.u16 r0, q0 ; CHECK-NEXT: pop.w {r7, lr} ; CHECK-NEXT: sxth r0, r0 @@ -456,25 +416,19 @@ ; CHECK-NEXT: vaddv.u32 r12, q0 ; CHECK-NEXT: cbz r2, .LBB6_7 ; CHECK-NEXT: @ %bb.4: @ %vector.ph47 -; CHECK-NEXT: movs r3, #1 -; CHECK-NEXT: add.w lr, r3, r6, lsr #2 ; CHECK-NEXT: movs r3, #0 -; CHECK-NEXT: dls lr, lr +; CHECK-NEXT: dlstp.32 lr, r2 ; CHECK-NEXT: vdup.32 q0, r3 -; CHECK-NEXT: vmov.32 q0[0], r12 +; CHECK-NEXT: vmov.32 q1[0], r12 ; CHECK-NEXT: .LBB6_5: @ %vector.body46 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vctp.32 r2 -; CHECK-NEXT: vmov q1, q0 -; CHECK-NEXT: vpstt -; CHECK-NEXT: vldrbt.u32 q0, [r0], #4 -; CHECK-NEXT: vldrbt.u32 q2, [r1], #4 -; CHECK-NEXT: subs r2, #4 +; CHECK-NEXT: vldrb.u32 q0, [r0], #4 +; CHECK-NEXT: vldrb.u32 q2, [r1], #4 ; CHECK-NEXT: vmul.i32 q0, q2, q0 -; CHECK-NEXT: vadd.i32 q0, q0, q1 -; CHECK-NEXT: le lr, .LBB6_5 +; CHECK-NEXT: vadd.i32 q1, q0, q1 +; CHECK-NEXT: letp lr, .LBB6_5 ; CHECK-NEXT: @ %bb.6: @ %middle.block44 -; CHECK-NEXT: vpsel q0, q0, q1 +; CHECK-NEXT: vmov q0, q1 ; CHECK-NEXT: vaddv.u32 r12, q0 ; CHECK-NEXT: .LBB6_7: @ %for.cond.cleanup7 ; CHECK-NEXT: mov r0, r12 diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll --- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll +++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll @@ -9,28 +9,19 @@ ; CHECK-NEXT: moveq r0, #0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: adds r3, r2, #3 -; CHECK-NEXT: vmov.i32 q0, #0x0 -; CHECK-NEXT: bic r3, r3, #3 -; CHECK-NEXT: sub.w r12, r3, #4 -; CHECK-NEXT: movs r3, #1 -; CHECK-NEXT: add.w lr, r3, r12, lsr #2 +; CHECK-NEXT: vmov.i32 q1, #0x0 ; CHECK-NEXT: movs r3, #0 -; CHECK-NEXT: dls lr, lr +; CHECK-NEXT: dlstp.32 lr, r2 ; CHECK-NEXT: .LBB0_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vctp.32 r2 -; CHECK-NEXT: vmov q1, q0 -; CHECK-NEXT: vpstt -; CHECK-NEXT: vldrwt.u32 q0, [r0], #16 -; CHECK-NEXT: vldrwt.u32 q2, [r1], #16 +; CHECK-NEXT: vldrw.u32 q0, [r0], #16 +; CHECK-NEXT: vldrw.u32 q2, [r1], #16 ; CHECK-NEXT: adds r3, #4 ; CHECK-NEXT: vmul.i32 q0, q2, q0 -; CHECK-NEXT: subs r2, #4 -; CHECK-NEXT: vadd.i32 q0, q0, q1 -; CHECK-NEXT: le lr, .LBB0_1 +; CHECK-NEXT: vadd.i32 q1, q0, q1 +; CHECK-NEXT: letp lr, .LBB0_1 ; CHECK-NEXT: @ %bb.2: @ %middle.block -; CHECK-NEXT: vpsel q0, q0, q1 +; CHECK-NEXT: vmov q0, q1 ; CHECK-NEXT: vaddv.u32 r0, q0 ; CHECK-NEXT: pop {r7, pc} entry: @@ -85,26 +76,17 @@ ; CHECK-NEXT: moveq r0, #0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: adds r1, r2, #3 -; CHECK-NEXT: movs r3, #1 -; CHECK-NEXT: bic r1, r1, #3 -; CHECK-NEXT: vmov.i32 q0, #0x0 -; CHECK-NEXT: subs r1, #4 -; CHECK-NEXT: add.w lr, r3, r1, lsr #2 +; CHECK-NEXT: vmov.i32 q1, #0x0 ; CHECK-NEXT: movs r1, #0 -; CHECK-NEXT: dls lr, lr +; CHECK-NEXT: dlstp.32 lr, r2 ; CHECK-NEXT: .LBB1_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vctp.32 r2 -; CHECK-NEXT: vmov q1, q0 -; CHECK-NEXT: vpst -; CHECK-NEXT: vldrwt.u32 q0, [r0], #16 +; CHECK-NEXT: vldrw.u32 q0, [r0], #16 ; CHECK-NEXT: adds r1, #4 -; CHECK-NEXT: subs r2, #4 -; CHECK-NEXT: vadd.i32 q0, q0, q1 -; CHECK-NEXT: le lr, .LBB1_1 +; CHECK-NEXT: vadd.i32 q1, q0, q1 +; CHECK-NEXT: letp lr, .LBB1_1 ; CHECK-NEXT: @ %bb.2: @ %middle.block -; CHECK-NEXT: vpsel q0, q0, q1 +; CHECK-NEXT: vmov q0, q1 ; CHECK-NEXT: vaddv.u32 r0, q0 ; CHECK-NEXT: pop {r7, pc} entry: @@ -155,26 +137,17 @@ ; CHECK-NEXT: moveq r0, #0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: adds r1, r2, #3 -; CHECK-NEXT: movs r3, #1 -; CHECK-NEXT: bic r1, r1, #3 -; CHECK-NEXT: vmov.i32 q0, #0x0 -; CHECK-NEXT: subs r1, #4 -; CHECK-NEXT: add.w lr, r3, r1, lsr #2 +; CHECK-NEXT: vmov.i32 q1, #0x0 ; CHECK-NEXT: movs r1, #0 -; CHECK-NEXT: dls lr, lr +; CHECK-NEXT: dlstp.32 lr, r2 ; CHECK-NEXT: .LBB2_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vctp.32 r2 -; CHECK-NEXT: vmov q1, q0 -; CHECK-NEXT: vpst -; CHECK-NEXT: vldrwt.u32 q0, [r0], #16 +; CHECK-NEXT: vldrw.u32 q0, [r0], #16 ; CHECK-NEXT: adds r1, #4 -; CHECK-NEXT: subs r2, #4 -; CHECK-NEXT: vadd.i32 q0, q0, q1 -; CHECK-NEXT: le lr, .LBB2_1 +; CHECK-NEXT: vadd.i32 q1, q0, q1 +; CHECK-NEXT: letp lr, .LBB2_1 ; CHECK-NEXT: @ %bb.2: @ %middle.block -; CHECK-NEXT: vpsel q0, q0, q1 +; CHECK-NEXT: vmov q0, q1 ; CHECK-NEXT: vaddv.u32 r0, q0 ; CHECK-NEXT: pop {r7, pc} entry: