Index: llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp =================================================================== --- llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp +++ llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp @@ -37,6 +37,7 @@ #include "llvm/IR/Value.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" +#include "llvm/Transforms/Utils/Local.h" #include #include @@ -67,6 +68,7 @@ void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); AU.addRequired(); + AU.addRequired(); FunctionPass::getAnalysisUsage(AU); } @@ -83,7 +85,7 @@ // Compute the scale of this gather/scatter instruction int computeScale(unsigned GEPElemSize, unsigned MemoryElemSize); - bool lowerGather(IntrinsicInst *I); + Value *lowerGather(IntrinsicInst *I); // Create a gather from a base + vector of offsets Value *tryCreateMaskedGatherOffset(IntrinsicInst *I, Value *Ptr, Instruction *&Root, IRBuilder<> &Builder); @@ -91,13 +93,22 @@ Value *tryCreateMaskedGatherBase(IntrinsicInst *I, Value *Ptr, IRBuilder<> &Builder); - bool lowerScatter(IntrinsicInst *I); + Value *lowerScatter(IntrinsicInst *I); // Create a scatter to a base + vector of offsets - Value *tryCreateMaskedScatterOffset(IntrinsicInst *I, Value *Ptr, + Value *tryCreateMaskedScatterOffset(IntrinsicInst *I, Value *Offsets, IRBuilder<> &Builder); // Create a scatter to a vector of pointers Value *tryCreateMaskedScatterBase(IntrinsicInst *I, Value *Ptr, IRBuilder<> &Builder); + + // Check whether these offsets could be moved out of the loop they're in + bool optimiseOffsets(Value *Offsets, BasicBlock *BB, LoopInfo *LI); + // Pushes the given add out of the loop + void pushOutAdd(PHINode *&Phi, Value *SecondOperand, unsigned StartIndex); + // Pushes the given mul out of the loop + void pushOutMul(PHINode *&Phi, Value *IncrementPerRound, Value *SecondOperand, + unsigned LoopIncrement, IRBuilder<> &Builder); + }; } // end anonymous namespace @@ -205,7 +216,7 @@ return -1; } -bool MVEGatherScatterLowering::lowerGather(IntrinsicInst *I) { +Value *MVEGatherScatterLowering::lowerGather(IntrinsicInst *I) { using namespace PatternMatch; LLVM_DEBUG(dbgs() << "masked gathers: checking transform preconditions\n"); @@ -220,7 +231,7 @@ if (!isLegalTypeAndAlignment(Ty->getVectorNumElements(), Ty->getScalarSizeInBits(), Alignment)) - return false; + return nullptr; lookThroughBitcast(Ptr); assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); @@ -233,7 +244,7 @@ if (!Load) Load = tryCreateMaskedGatherBase(I, Ptr, Builder); if (!Load) - return false; + return nullptr; if (!isa(PassThru) && !match(PassThru, m_Zero())) { LLVM_DEBUG(dbgs() << "masked gathers: found non-trivial passthru - " @@ -247,12 +258,14 @@ // If this was an extending gather, we need to get rid of the sext/zext // sext/zext as well as of the gather itself I->eraseFromParent(); + LLVM_DEBUG(dbgs() << "masked gathers: successfully built masked gather\n"); - return true; + return Load; } -Value *MVEGatherScatterLowering::tryCreateMaskedGatherBase( - IntrinsicInst *I, Value *Ptr, IRBuilder<> &Builder) { +Value *MVEGatherScatterLowering::tryCreateMaskedGatherBase(IntrinsicInst *I, + Value *Ptr, + IRBuilder<> &Builder) { using namespace PatternMatch; Type *Ty = I->getType(); LLVM_DEBUG(dbgs() << "masked gathers: loading from vector of pointers\n"); @@ -287,7 +300,7 @@ if (!I->hasOneUse()) return nullptr; - // The correct root to replace is the not the CallInst itself, but the + // The correct root to replace is not the CallInst itself, but the // instruction which extends it Extend = cast(*I->users().begin()); if (isa(Extend)) { @@ -334,7 +347,7 @@ Builder.getInt32(Scale), Builder.getInt32(Unsigned)}); } -bool MVEGatherScatterLowering::lowerScatter(IntrinsicInst *I) { +Value *MVEGatherScatterLowering::lowerScatter(IntrinsicInst *I) { using namespace PatternMatch; LLVM_DEBUG(dbgs() << "masked scatters: checking transform preconditions\n"); @@ -348,7 +361,7 @@ if (!isLegalTypeAndAlignment(Ty->getVectorNumElements(), Ty->getScalarSizeInBits(), Alignment)) - return false; + return nullptr; lookThroughBitcast(Ptr); assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); @@ -360,12 +373,12 @@ if (!Store) Store = tryCreateMaskedScatterBase(I, Ptr, Builder); if (!Store) - return false; + return nullptr; LLVM_DEBUG(dbgs() << "masked scatters: successfully built masked scatter\n"); I->replaceAllUsesWith(Store); I->eraseFromParent(); - return true; + return Store; } Value *MVEGatherScatterLowering::tryCreateMaskedScatterBase( @@ -445,6 +458,268 @@ Builder.getInt32(Scale)}); } +void MVEGatherScatterLowering::pushOutAdd(PHINode *&Phi, Value *SecondOperand, + unsigned StartIndex) { + LLVM_DEBUG(dbgs() << "masked gathers/scatters: optimising add instruction\n"); + Instruction *InsertionPoint; + if (isa(SecondOperand)) + InsertionPoint = &cast(SecondOperand)->getParent()->back(); + else + InsertionPoint = + &cast(Phi->getIncomingBlock(StartIndex)->back()); + // Initialize the phi with a vector that contains a sum of the constants + Instruction *NewIndex = BinaryOperator::Create( + Instruction::Add, Phi->getIncomingValue(StartIndex), SecondOperand, + "PushedOutAdd", InsertionPoint); + unsigned IncrementIndex = StartIndex == 0 ? 1 : 0; + + // Order such that start index comes first (this reduces mov's) + Phi->addIncoming(NewIndex, Phi->getIncomingBlock(StartIndex)); + Phi->addIncoming(Phi->getIncomingValue(IncrementIndex), + Phi->getIncomingBlock(IncrementIndex)); + Phi->removeIncomingValue(IncrementIndex); + Phi->removeIncomingValue(StartIndex); +} + +void MVEGatherScatterLowering::pushOutMul(PHINode *&Phi, + Value *IncrementPerRound, + Value *SecondOperand, + unsigned LoopIncrement, + IRBuilder<> &Builder) { + LLVM_DEBUG(dbgs() << "masked gathers/scatters: optimising mul instruction\n"); + + // Create a new scalar add outside of the loop and transform it to a splat + // by which loop variable can be incremented + Instruction *InsertionPoint; + if (isa(SecondOperand)) + InsertionPoint = &cast(SecondOperand)->getParent()->back(); + else + InsertionPoint = &cast( + Phi->getIncomingBlock(LoopIncrement == 1 ? 0 : 1)->back()); + + // Create a new index + Value *StartIndex = BinaryOperator::Create( + Instruction::Mul, Phi->getIncomingValue(LoopIncrement == 1 ? 0 : 1), + SecondOperand, "PushedOutMul", InsertionPoint); + + Instruction *NewIncrement; + // Use information about constants known at compile time to save registers + if (isa(IncrementPerRound) && + cast(IncrementPerRound)->isSplat() && + isa(SecondOperand) && + cast(SecondOperand)->isSplat()) { + int Product = cast(IncrementPerRound) + ->getUniqueInteger() + .getZExtValue() * + cast(SecondOperand) + ->getUniqueInteger() + .getZExtValue(); + // Increment NewIndex by Product instead of doing the multiplication + Value *ProductVal = Builder.getIntN(Phi->getType()->getScalarSizeInBits(), Product); + NewIncrement = BinaryOperator::Create( + Instruction::Add, Phi, + Builder.CreateVectorSplat( + IncrementPerRound->getType()->getVectorNumElements(), ProductVal), + "IncrementPushedOutMul", + cast(Phi->getIncomingBlock(LoopIncrement)->back()) + .getPrevNode()); + } else { + // No constants, solve the task at run time + Instruction *Product = + BinaryOperator::Create(Instruction::Mul, IncrementPerRound, + SecondOperand, "Product", InsertionPoint); + + // Increment NewIndex by Product instead of the multiplication + NewIncrement = BinaryOperator::Create( + Instruction::Add, Phi, Product, "IncrementPushedOutMul", + cast(Phi->getIncomingBlock(LoopIncrement)->back()) + .getPrevNode()); + } + + Phi->addIncoming(StartIndex, + Phi->getIncomingBlock(LoopIncrement == 1 ? 0 : 1)); + Phi->addIncoming(NewIncrement, Phi->getIncomingBlock(LoopIncrement)); + Phi->removeIncomingValue((unsigned)0); + Phi->removeIncomingValue((unsigned)0); + return; +} + +bool AllGatScatUsers(Instruction *I) { + bool gatscat = true; + if (I->hasNUses(0)) { + return false; + } + for (User *U : I->users()) { + unsigned IntrinsicNum = + isa(U) ? cast(U)->getIntrinsicID() : 0; + unsigned OpCode = + isa(U) ? cast(U)->getOpcode() : 0; + if (isa(U) || + (IntrinsicNum == Intrinsic::masked_gather || + (IntrinsicNum >= Intrinsic::arm_mve_vldr_gather_base && + IntrinsicNum <= Intrinsic::arm_mve_vldr_gather_offset_predicated) || + IntrinsicNum == Intrinsic::masked_scatter || + (IntrinsicNum >= Intrinsic::arm_mve_vstr_scatter_base && + IntrinsicNum <= Intrinsic::arm_mve_vstr_scatter_offset_predicated))) { + return gatscat; + } else if (OpCode != 0) { + if (OpCode == Instruction::Add || OpCode == Instruction::Mul) { + gatscat &= AllGatScatUsers(cast(U)); + } else { + gatscat = false; + } + } + } + return gatscat; +} + +bool MVEGatherScatterLowering::optimiseOffsets(Value *Offsets, BasicBlock *BB, + LoopInfo *LI) { + LLVM_DEBUG(dbgs() << "masked gathers/scatters: trying to optimize\n"); + // Optimise the addresses of gathers/scatters by moving invariant + // calculations out of the loop + if (!isa(Offsets)) + return false; + Instruction *Offs = cast(Offsets); + if (Offs->getOpcode() != Instruction::Add && + Offs->getOpcode() != Instruction::Mul) + return false; + if (!Offs->hasOneUse()) { + if (!AllGatScatUsers(Offs)) + return false; + } + + // Get a collection of all phi nodes in this block, and keep track of their + // increments (only look at the 'easy' case with exactly two incoming blocks) + std::vector PhiNodes; + llvm::Loop *Loop = LI->getLoopFor(BB); + while (Loop != nullptr) { + for (Instruction &I : Loop->getHeader()->phis()) + if (cast(I).getNumIncomingValues() == 2) + PhiNodes.push_back(&I); + Loop = Loop->getParentLoop(); + } + // If we didn't find any phi nodes in the block, we either are not in a loop + // or buried deeper inside a loop + if (PhiNodes.empty()) + return false; + IRBuilder<> Builder(BB->getContext()); + Builder.SetInsertPoint(cast(PhiNodes[0])); + Builder.SetCurrentDebugLocation(cast(PhiNodes[0])->getDebugLoc()); + + // Find out which value the increment is and which one is corresponding to + // the start value, or just make a recursive call + int PhiOp = -1; + if (llvm::find(PhiNodes, Offs->getOperand(0)) != PhiNodes.end()) { + PhiOp = 0; + } else if (llvm::find(PhiNodes, Offs->getOperand(1)) != PhiNodes.end()) { + PhiOp = 1; + } else if (LI->getLoopFor(BB)) { + bool Changed = true; + if (isa(Offs->getOperand(0)) && + LI->getLoopFor(BB)->contains(cast(Offs->getOperand(0)))) + Changed |= optimiseOffsets(Offs->getOperand(0), BB, LI); + if (isa(Offs->getOperand(1)) && + LI->getLoopFor(BB)->contains(cast(Offs->getOperand(1)))) + Changed |= optimiseOffsets(Offs->getOperand(1), BB, LI); + if (!Changed) { + return false; + } else { + if (llvm::find(PhiNodes, Offs->getOperand(0)) != PhiNodes.end()) { + PhiOp = 0; + } else if (llvm::find(PhiNodes, Offs->getOperand(1)) != PhiNodes.end()) { + PhiOp = 1; + } else { + return false; + } + } + } else { + return false; + } + + PHINode *Phi = cast(Offs->getOperand(PhiOp)); + // The phi must be an induction variable + Instruction *Op; + int IncrementingBlock = -1; + + if ((Op = dyn_cast(Phi->getIncomingValue(0))) != nullptr) + if (Op->getOpcode() == Instruction::Add && + ((Op->getOperand(0) == Phi) || (Op->getOperand(1) == Phi))) { + IncrementingBlock = 0; + } + if ((Op = dyn_cast(Phi->getIncomingValue(1))) != nullptr) + if (Op->getOpcode() == Instruction::Add && + ((Op->getOperand(0) == Phi) || (Op->getOperand(1) == Phi))) { + IncrementingBlock = 1; + } + if (IncrementingBlock == -1) + return false; + + Instruction *IncInstruction = + cast(Phi->getIncomingValue(IncrementingBlock)); + + // If the phi is not used by anything else, we can just adapt it when + // replacing the instruction; if it is, we'll have to duplicate it + PHINode *NewPhi; + Value *IncrementPerRound = IncInstruction->getOperand( + (IncInstruction->getOperand(0) == Phi) ? 1 : 0); + + // Get the value that is added to/multiplied with the phi + Value *SecondOperand = Offs->getOperand(PhiOp == 0 ? 1 : 0); + + if (IncrementPerRound->getType() != SecondOperand->getType()) + // Something has gone wrong, abort + return false; + + if (isa(IncrementPerRound)) + // TODO: Add functionality to deal with that + return false; + if (Phi->getNumUses() == 2) { + // No other users -> reuse existing phi (One user is the instruction + // we're looking at, the other is the phi increment) + NewPhi = Phi; + } else { + // There are other users -> create a new phi + NewPhi = PHINode::Create(Phi->getType(), 0, "NewPhi", Phi); + std::vector Increases; + // Copy the incoming values of the old phi + NewPhi->addIncoming(Phi->getIncomingValue(IncrementingBlock == 1 ? 0 : 1), + Phi->getIncomingBlock(IncrementingBlock == 1 ? 0 : 1)); + IncInstruction = BinaryOperator::Create( + Instruction::BinaryOps(IncInstruction->getOpcode()), NewPhi, + IncrementPerRound, "LoopIncrement", IncInstruction); + NewPhi->addIncoming(IncInstruction, + Phi->getIncomingBlock(IncrementingBlock)); + IncrementingBlock = 1; + PhiNodes.push_back(NewPhi); + } + + switch (Offs->getOpcode()) { + case Instruction::Add: + pushOutAdd(NewPhi, SecondOperand, IncrementingBlock == 1 ? 0 : 1); + break; + case Instruction::Mul: + pushOutMul(NewPhi, IncrementPerRound, SecondOperand, IncrementingBlock, + Builder); + break; + default: + return false; + } + LLVM_DEBUG( + dbgs() << "masked gathers/scatters: simplified loop variable add/mul\n"); + + // The instruction has now been "absorbed" into the phi value + Offs->replaceAllUsesWith(NewPhi); + if (Offs->hasNUses(0)) + Offs->eraseFromParent(); + // Clean up the old increment in case it's unused because we built a new + // one + if (IncInstruction->hasNUses(0)) + IncInstruction->eraseFromParent(); + + return true; +} + bool MVEGatherScatterLowering::runOnFunction(Function &F) { if (!EnableMaskedGatherScatters) return false; @@ -455,6 +730,8 @@ return false; SmallVector Gathers; SmallVector Scatters; + LoopInfo &LI = getAnalysis().getLoopInfo(); + for (BasicBlock &BB : F) { for (Instruction &I : BB) { IntrinsicInst *II = dyn_cast(&I); @@ -466,10 +743,30 @@ } bool Changed = false; - for (IntrinsicInst *I : Gathers) - Changed |= lowerGather(I); - for (IntrinsicInst *I : Scatters) - Changed |= lowerScatter(I); + for (unsigned i = 0; i < Gathers.size(); i++) { + IntrinsicInst *I = Gathers[i]; + if (isa(I->getArgOperand(0))) + optimiseOffsets(cast(I->getArgOperand(0))->getOperand(1), + I->getParent(), &LI); + Value *L = lowerGather(I); + if (L == nullptr) + continue; + // Get rid of any now dead instructions + SimplifyInstructionsInBlock(cast(L)->getParent()); + Changed = true; + } + for (unsigned i = 0; i < Scatters.size(); i++) { + IntrinsicInst *I = Scatters[i]; + if (isa(I->getArgOperand(1))) + optimiseOffsets(cast(I->getArgOperand(1))->getOperand(1), + I->getParent(), &LI); + Value *S = lowerScatter(I); + if (S == nullptr) + continue; + // Get rid of any now dead instructions + SimplifyInstructionsInBlock(cast(S)->getParent()); + Changed = true; + } return Changed; } Index: llvm/test/CodeGen/ARM/O3-pipeline.ll =================================================================== --- llvm/test/CodeGen/ARM/O3-pipeline.ll +++ llvm/test/CodeGen/ARM/O3-pipeline.ll @@ -7,11 +7,11 @@ ; CHECK-NEXT: FunctionPass Manager ; CHECK-NEXT: Expand Atomic instructions ; CHECK-NEXT: Simplify the CFG -; CHECK-NEXT: MVE gather/scatter lowering ; CHECK-NEXT: Dominator Tree Construction +; CHECK-NEXT: Natural Loop Information +; CHECK-NEXT: MVE gather/scatter lowering ; CHECK-NEXT: Basic Alias Analysis (stateless AA impl) ; CHECK-NEXT: Module Verifier -; CHECK-NEXT: Natural Loop Information ; CHECK-NEXT: Canonicalize natural loops ; CHECK-NEXT: Scalar Evolution Analysis ; CHECK-NEXT: Loop Pass Manager Index: llvm/test/CodeGen/Thumb2/mve-gather-optimisation-deep.ll =================================================================== --- llvm/test/CodeGen/Thumb2/mve-gather-optimisation-deep.ll +++ llvm/test/CodeGen/Thumb2/mve-gather-optimisation-deep.ll @@ -7,25 +7,24 @@ ; CHECK-LABEL: @push_out_add_sub_block( ; CHECK-NEXT: vector.ph: ; CHECK-NEXT: [[IND_END:%.*]] = shl i32 [[N_VEC:%.*]], 1 +; CHECK-NEXT: [[PUSHEDOUTADD:%.*]] = add <4 x i32> , ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY_END:%.*]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY_END]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[PUSHEDOUTADD]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY_END]] ] ; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX]], 50 ; CHECK-NEXT: br i1 [[TMP0]], label [[LOWER_BLOCK:%.*]], label [[END:%.*]] ; CHECK: lower.block: -; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[VEC_IND]], -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], <4 x i32> [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* [[DATA]], <4 x i32> [[TMP1]], i32 32, i32 2, i32 1) -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32* [[TMP4]] to <4 x i32>* -; CHECK-NEXT: store <4 x i32> [[TMP3]], <4 x i32>* [[TMP5]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* [[DATA:%.*]], <4 x i32> [[VEC_IND]], i32 32, i32 2, i32 1) +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP1]], <4 x i32>* [[TMP3]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], ; CHECK-NEXT: br label [[VECTOR_BODY_END]] ; CHECK: vector.body.end: -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label [[END]], label [[VECTOR_BODY]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP4]], label [[END]], label [[VECTOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; @@ -63,26 +62,25 @@ ; CHECK-LABEL: @push_out_mul_sub_block( ; CHECK-NEXT: vector.ph: ; CHECK-NEXT: [[IND_END:%.*]] = shl i32 [[N_VEC:%.*]], 1 +; CHECK-NEXT: [[PUSHEDOUTMUL:%.*]] = mul <4 x i32> , +; CHECK-NEXT: [[PUSHEDOUTADD:%.*]] = add <4 x i32> [[PUSHEDOUTMUL]], ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY_END:%.*]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY_END]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[PUSHEDOUTADD]], [[VECTOR_PH]] ], [ [[INCREMENTPUSHEDOUTMUL:%.*]], [[VECTOR_BODY_END]] ] ; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX]], 50 ; CHECK-NEXT: br i1 [[TMP0]], label [[LOWER_BLOCK:%.*]], label [[END:%.*]] ; CHECK: lower.block: -; CHECK-NEXT: [[TMP1:%.*]] = mul <4 x i32> [[VEC_IND]], -; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i32> [[TMP1]], -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], <4 x i32> [[TMP2]] -; CHECK-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* [[DATA]], <4 x i32> [[TMP2]], i32 32, i32 2, i32 1) -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>* -; CHECK-NEXT: store <4 x i32> [[TMP4]], <4 x i32>* [[TMP6]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* [[DATA:%.*]], <4 x i32> [[VEC_IND]], i32 32, i32 2, i32 1) +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP1]], <4 x i32>* [[TMP3]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], ; CHECK-NEXT: br label [[VECTOR_BODY_END]] ; CHECK: vector.body.end: -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[END]], label [[VECTOR_BODY]] +; CHECK-NEXT: [[INCREMENTPUSHEDOUTMUL]] = add <4 x i32> [[VEC_IND]], +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP4]], label [[END]], label [[VECTOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; @@ -122,32 +120,30 @@ ; CHECK-LABEL: @push_out_mul_sub_loop( ; CHECK-NEXT: vector.ph: ; CHECK-NEXT: [[IND_END:%.*]] = shl i32 [[N_VEC:%.*]], 2 +; CHECK-NEXT: [[PUSHEDOUTMUL:%.*]] = mul <4 x i32> , +; CHECK-NEXT: [[PUSHEDOUTADD:%.*]] = add <4 x i32> [[PUSHEDOUTMUL]], ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY_END:%.*]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY_END]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[PUSHEDOUTADD]], [[VECTOR_PH]] ], [ [[INCREMENTPUSHEDOUTMUL:%.*]], [[VECTOR_BODY_END]] ] ; CHECK-NEXT: br label [[VECTOR_2_PH:%.*]] ; CHECK: vector.2.ph: ; CHECK-NEXT: br label [[VECTOR_2_BODY:%.*]] ; CHECK: vector.2.body: -; CHECK-NEXT: [[INDEX_2:%.*]] = phi i32 [ 0, [[VECTOR_2_PH]] ], [ [[INDEX_2_NEXT:%.*]], [[VECTOR_2_BODY_END:%.*]] ] -; CHECK-NEXT: [[TMP0:%.*]] = mul <4 x i32> [[VEC_IND]], -; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[TMP0]], -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], <4 x i32> [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* [[DATA]], <4 x i32> [[TMP1]], i32 32, i32 2, i32 1) -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32* [[TMP4]] to <4 x i32>* -; CHECK-NEXT: store <4 x i32> [[TMP3]], <4 x i32>* [[TMP5]], align 4 -; CHECK-NEXT: br label [[VECTOR_2_BODY_END]] +; CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* [[DATA:%.*]], <4 x i32> [[VEC_IND]], i32 32, i32 2, i32 1) +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP0]], <4 x i32>* [[TMP2]], align 4 +; CHECK-NEXT: br label [[VECTOR_2_BODY_END:%.*]] ; CHECK: vector.2.body.end: -; CHECK-NEXT: [[INDEX_2_NEXT]] = add i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_2_NEXT]], 15 -; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_BODY_END]], label [[VECTOR_2_BODY]] +; CHECK-NEXT: [[INDEX_2_NEXT:%.*]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_2_NEXT]], 15 +; CHECK-NEXT: br i1 [[TMP3]], label [[VECTOR_BODY_END]], label [[VECTOR_2_BODY]] ; CHECK: vector.body.end: ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[END:%.*]], label [[VECTOR_BODY]] +; CHECK-NEXT: [[INCREMENTPUSHEDOUTMUL]] = add <4 x i32> [[VEC_IND]], +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP4]], label [[END:%.*]], label [[VECTOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; Index: llvm/test/CodeGen/Thumb2/mve-gather-optimisation.ll =================================================================== --- llvm/test/CodeGen/Thumb2/mve-gather-optimisation.ll +++ llvm/test/CodeGen/Thumb2/mve-gather-optimisation.ll @@ -19,30 +19,25 @@ define arm_aapcs_vfpcc void @push_out_mul(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) { ; CHECK-LABEL: push_out_mul: ; CHECK: @ %bb.0: @ %vector.ph -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: adr r3, .LCPI0_0 -; CHECK-NEXT: vmov.i32 q1, #0x8 +; CHECK-NEXT: vmov.i32 q1, #0x18 ; CHECK-NEXT: vldrw.u32 q0, [r3] -; CHECK-NEXT: vmov.i32 q2, #0x3 ; CHECK-NEXT: .LBB0_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmul.i32 q3, q0, q2 +; CHECK-NEXT: vldrw.u32 q2, [r0, q0, uxtw #2] ; CHECK-NEXT: subs r2, #4 -; CHECK-NEXT: vldrw.u32 q4, [r0, q3, uxtw #2] ; CHECK-NEXT: vadd.i32 q0, q0, q1 -; CHECK-NEXT: vstrb.8 q4, [r1], #16 +; CHECK-NEXT: vstrb.8 q2, [r1], #16 ; CHECK-NEXT: bne .LBB0_1 ; CHECK-NEXT: @ %bb.2: @ %end -; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.3: ; CHECK-NEXT: .LCPI0_0: ; CHECK-NEXT: .long 0 @ 0x0 -; CHECK-NEXT: .long 2 @ 0x2 -; CHECK-NEXT: .long 4 @ 0x4 ; CHECK-NEXT: .long 6 @ 0x6 +; CHECK-NEXT: .long 12 @ 0xc +; CHECK-NEXT: .long 18 @ 0x12 vector.ph: ; preds = %for.body.preheader %ind.end = shl i32 %n.vec, 1 @@ -69,30 +64,25 @@ define arm_aapcs_vfpcc void @push_out_add(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) { ; CHECK-LABEL: push_out_add: ; CHECK: @ %bb.0: @ %vector.ph -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: adr r3, .LCPI1_0 ; CHECK-NEXT: vmov.i32 q1, #0x8 ; CHECK-NEXT: vldrw.u32 q0, [r3] -; CHECK-NEXT: vmov.i32 q2, #0x6 ; CHECK-NEXT: .LBB1_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vadd.i32 q3, q0, q2 +; CHECK-NEXT: vldrw.u32 q2, [r0, q0, uxtw #2] ; CHECK-NEXT: subs r2, #4 -; CHECK-NEXT: vldrw.u32 q4, [r0, q3, uxtw #2] ; CHECK-NEXT: vadd.i32 q0, q0, q1 -; CHECK-NEXT: vstrb.8 q4, [r1], #16 +; CHECK-NEXT: vstrb.8 q2, [r1], #16 ; CHECK-NEXT: bne .LBB1_1 ; CHECK-NEXT: @ %bb.2: @ %end -; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.3: ; CHECK-NEXT: .LCPI1_0: -; CHECK-NEXT: .long 0 @ 0x0 -; CHECK-NEXT: .long 2 @ 0x2 -; CHECK-NEXT: .long 4 @ 0x4 ; CHECK-NEXT: .long 6 @ 0x6 +; CHECK-NEXT: .long 8 @ 0x8 +; CHECK-NEXT: .long 10 @ 0xa +; CHECK-NEXT: .long 12 @ 0xc vector.ph: ; preds = %for.body.preheader %ind.end = shl i32 %n.vec, 1 @@ -119,30 +109,25 @@ define arm_aapcs_vfpcc void @push_out_add_sub_block(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) { ; CHECK-LABEL: push_out_add_sub_block: ; CHECK: @ %bb.0: @ %vector.ph -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: adr r3, .LCPI2_0 ; CHECK-NEXT: vmov.i32 q1, #0x8 ; CHECK-NEXT: vldrw.u32 q0, [r3] -; CHECK-NEXT: vmov.i32 q2, #0x6 ; CHECK-NEXT: .LBB2_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vadd.i32 q3, q0, q2 +; CHECK-NEXT: vldrw.u32 q2, [r0, q0, uxtw #2] ; CHECK-NEXT: subs r2, #4 -; CHECK-NEXT: vldrw.u32 q4, [r0, q3, uxtw #2] ; CHECK-NEXT: vadd.i32 q0, q0, q1 -; CHECK-NEXT: vstrb.8 q4, [r1], #16 +; CHECK-NEXT: vstrb.8 q2, [r1], #16 ; CHECK-NEXT: bne .LBB2_1 ; CHECK-NEXT: @ %bb.2: @ %end -; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.3: ; CHECK-NEXT: .LCPI2_0: -; CHECK-NEXT: .long 0 @ 0x0 -; CHECK-NEXT: .long 2 @ 0x2 -; CHECK-NEXT: .long 4 @ 0x4 ; CHECK-NEXT: .long 6 @ 0x6 +; CHECK-NEXT: .long 8 @ 0x8 +; CHECK-NEXT: .long 10 @ 0xa +; CHECK-NEXT: .long 12 @ 0xc vector.ph: ; preds = %for.body.preheader %ind.end = shl i32 %n.vec, 1 @@ -285,54 +270,65 @@ ; CHECK: @ %bb.0: @ %for.cond8.preheader.us.us.preheader.preheader ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, lr} -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: ldrd r6, r12, [sp, #80] -; CHECK-NEXT: sub.w r7, r12, #1 -; CHECK-NEXT: movs r5, #1 +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: .pad #48 +; CHECK-NEXT: sub sp, #48 +; CHECK-NEXT: adr r6, .LCPI5_0 +; CHECK-NEXT: ldrd r9, r12, [sp, #144] +; CHECK-NEXT: vldrw.u32 q0, [r6] +; CHECK-NEXT: sub.w r6, r12, #1 +; CHECK-NEXT: movs r7, #1 +; CHECK-NEXT: vdup.32 q2, r9 +; CHECK-NEXT: add.w r6, r7, r6, lsr #1 +; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill +; CHECK-NEXT: bic r6, r6, #3 +; CHECK-NEXT: vmul.i32 q0, q0, r9 +; CHECK-NEXT: subs r6, #4 +; CHECK-NEXT: vshl.i32 q2, q2, #3 ; CHECK-NEXT: mov.w r8, #0 -; CHECK-NEXT: add.w r7, r5, r7, lsr #1 -; CHECK-NEXT: vmov.i32 q1, #0x8 -; CHECK-NEXT: bic r7, r7, #3 -; CHECK-NEXT: subs r7, #4 -; CHECK-NEXT: add.w r10, r5, r7, lsr #2 -; CHECK-NEXT: adr r7, .LCPI5_0 -; CHECK-NEXT: vldrw.u32 q0, [r7] +; CHECK-NEXT: vmov.i32 q3, #0x8 +; CHECK-NEXT: add.w r4, r7, r6, lsr #2 +; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill ; CHECK-NEXT: .LBB5_1: @ %for.cond8.preheader.us.us.preheader ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB5_2 Depth 2 ; CHECK-NEXT: @ Child Loop BB5_3 Depth 3 -; CHECK-NEXT: mul r9, r8, r6 -; CHECK-NEXT: movs r5, #0 +; CHECK-NEXT: mul r10, r8, r9 +; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload ; CHECK-NEXT: mul r7, r8, r12 +; CHECK-NEXT: vadd.i32 q0, q0, r7 +; CHECK-NEXT: movs r7, #0 +; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill ; CHECK-NEXT: .LBB5_2: @ %vector.ph ; CHECK-NEXT: @ Parent Loop BB5_1 Depth=1 ; CHECK-NEXT: @ => This Loop Header: Depth=2 ; CHECK-NEXT: @ Child Loop BB5_3 Depth 3 -; CHECK-NEXT: vmov.i32 q2, #0x0 -; CHECK-NEXT: vmov q3, q0 -; CHECK-NEXT: dls lr, r10 +; CHECK-NEXT: vmov.i32 q5, #0x0 +; CHECK-NEXT: vldrw.u32 q6, [sp, #32] @ 16-byte Reload +; CHECK-NEXT: vldrw.u32 q7, [sp, #16] @ 16-byte Reload +; CHECK-NEXT: dls lr, r4 ; CHECK-NEXT: .LBB5_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB5_1 Depth=1 ; CHECK-NEXT: @ Parent Loop BB5_2 Depth=2 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=3 -; CHECK-NEXT: vadd.i32 q5, q3, r7 -; CHECK-NEXT: vadd.i32 q4, q3, q1 -; CHECK-NEXT: vldrw.u32 q6, [r0, q5, uxtw #2] -; CHECK-NEXT: vdup.32 q5, r5 -; CHECK-NEXT: vmla.u32 q5, q3, r6 -; CHECK-NEXT: vldrw.u32 q3, [r1, q5, uxtw #2] -; CHECK-NEXT: vmul.i32 q3, q3, q6 -; CHECK-NEXT: vadd.i32 q2, q3, q2 -; CHECK-NEXT: vmov q3, q4 +; CHECK-NEXT: vadd.i32 q0, q7, q2 +; CHECK-NEXT: vadd.i32 q7, q7, r7 +; CHECK-NEXT: vldrw.u32 q4, [r1, q7, uxtw #2] +; CHECK-NEXT: vldrw.u32 q7, [r0, q6, uxtw #2] +; CHECK-NEXT: vadd.i32 q1, q6, q3 +; CHECK-NEXT: vmul.i32 q4, q4, q7 +; CHECK-NEXT: vmov q6, q1 +; CHECK-NEXT: vadd.i32 q5, q4, q5 +; CHECK-NEXT: vmov q7, q0 ; CHECK-NEXT: le lr, .LBB5_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB5_2 Depth=2 -; CHECK-NEXT: add.w lr, r5, r9 -; CHECK-NEXT: adds r5, #1 -; CHECK-NEXT: vaddv.u32 r4, q2 -; CHECK-NEXT: cmp r5, r6 -; CHECK-NEXT: str.w r4, [r2, lr, lsl #2] +; CHECK-NEXT: add.w r5, r7, r10 +; CHECK-NEXT: adds r7, #1 +; CHECK-NEXT: vaddv.u32 r6, q5 +; CHECK-NEXT: cmp r7, r9 +; CHECK-NEXT: str.w r6, [r2, r5, lsl #2] ; CHECK-NEXT: bne .LBB5_2 ; CHECK-NEXT: @ %bb.5: @ %for.cond4.for.cond.cleanup6_crit_edge.us ; CHECK-NEXT: @ in Loop: Header=BB5_1 Depth=1 @@ -340,7 +336,8 @@ ; CHECK-NEXT: cmp r8, r3 ; CHECK-NEXT: bne .LBB5_1 ; CHECK-NEXT: @ %bb.6: @ %for.end25 -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: add sp, #48 +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.7: @@ -413,8 +410,8 @@ ret void } -define dso_local void @arm_mat_mult_q31_short(i16* noalias nocapture readonly %A, i16* noalias nocapture readonly %B, i16* noalias nocapture %C, i32 %n, i32 %m, i32 %l) local_unnamed_addr #0 { -; CHECK-LABEL: arm_mat_mult_q31_short: +define dso_local void @arm_mat_mult_q15(i16* noalias nocapture readonly %A, i16* noalias nocapture readonly %B, i16* noalias nocapture %C, i32 %n, i32 %m, i32 %l) local_unnamed_addr #0 { +; CHECK-LABEL: arm_mat_mult_q15: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} @@ -426,11 +423,11 @@ ; CHECK-NEXT: sub sp, #32 ; CHECK-NEXT: strd r0, r2, [sp, #24] @ 8-byte Folded Spill ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: str r3, [sp, #4] @ 4-byte Spill +; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: mov r0, r3 ; CHECK-NEXT: itt ne -; CHECK-NEXT: ldrne.w r11, [sp, #104] -; CHECK-NEXT: cmpne.w r11, #0 +; CHECK-NEXT: ldrne.w lr, [sp, #104] +; CHECK-NEXT: cmpne.w lr, #0 ; CHECK-NEXT: bne .LBB6_2 ; CHECK-NEXT: .LBB6_1: @ %for.cond.cleanup ; CHECK-NEXT: add sp, #32 @@ -438,24 +435,25 @@ ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .LBB6_2: @ %for.cond1.preheader.us.preheader -; CHECK-NEXT: ldr.w r10, [sp, #108] -; CHECK-NEXT: mov r8, r1 +; CHECK-NEXT: ldr.w r11, [sp, #108] +; CHECK-NEXT: mov r10, r1 ; CHECK-NEXT: movs r1, #1 -; CHECK-NEXT: lsl.w r4, r11, #1 -; CHECK-NEXT: bic r0, r10, #3 +; CHECK-NEXT: lsl.w r4, lr, #1 +; CHECK-NEXT: bic r0, r11, #3 ; CHECK-NEXT: str r0, [sp, #16] @ 4-byte Spill ; CHECK-NEXT: subs r0, #4 ; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: vmov.i32 q5, #0x4 -; CHECK-NEXT: add.w r0, r1, r0, lsr #2 -; CHECK-NEXT: str r0, [sp, #12] @ 4-byte Spill -; CHECK-NEXT: lsl.w r0, r10, #1 -; CHECK-NEXT: movs r1, #0 -; CHECK-NEXT: str r0, [sp] @ 4-byte Spill +; CHECK-NEXT: add.w r8, r1, r0, lsr #2 +; CHECK-NEXT: lsl.w r0, r11, #1 +; CHECK-NEXT: str r0, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: adr r0, .LCPI6_0 -; CHECK-NEXT: vldrw.u32 q4, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0] ; CHECK-NEXT: ldr r0, [sp, #24] @ 4-byte Reload +; CHECK-NEXT: movs r1, #0 ; CHECK-NEXT: str r0, [sp, #20] @ 4-byte Spill +; CHECK-NEXT: vmul.i32 q4, q0, lr +; CHECK-NEXT: vdup.32 q0, lr +; CHECK-NEXT: vshl.i32 q5, q0, #2 ; CHECK-NEXT: b .LBB6_5 ; CHECK-NEXT: .LBB6_3: @ %for.cond5.preheader.us73.preheader ; CHECK-NEXT: @ in Loop: Header=BB6_5 Depth=1 @@ -463,15 +461,16 @@ ; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: add.w r0, r0, r12, lsl #1 ; CHECK-NEXT: bl __aeabi_memclr +; CHECK-NEXT: ldr.w lr, [sp, #104] ; CHECK-NEXT: .LBB6_4: @ %for.cond1.for.cond.cleanup3_crit_edge.us ; CHECK-NEXT: @ in Loop: Header=BB6_5 Depth=1 -; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload -; CHECK-NEXT: add r9, r10 +; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload +; CHECK-NEXT: add r9, r11 ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: add r1, r0 ; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill -; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload -; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload +; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload +; CHECK-NEXT: ldr r0, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: adds r1, #1 ; CHECK-NEXT: cmp r1, r0 ; CHECK-NEXT: beq .LBB6_1 @@ -480,9 +479,9 @@ ; CHECK-NEXT: @ Child Loop BB6_8 Depth 2 ; CHECK-NEXT: @ Child Loop BB6_11 Depth 3 ; CHECK-NEXT: @ Child Loop BB6_14 Depth 3 -; CHECK-NEXT: mul r12, r1, r11 -; CHECK-NEXT: cmp.w r10, #0 -; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill +; CHECK-NEXT: mul r12, r1, lr +; CHECK-NEXT: cmp.w r11, #0 +; CHECK-NEXT: str r1, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: beq .LBB6_3 ; CHECK-NEXT: @ %bb.6: @ %for.cond5.preheader.us.us.preheader ; CHECK-NEXT: @ in Loop: Header=BB6_5 Depth=1 @@ -490,66 +489,65 @@ ; CHECK-NEXT: b .LBB6_8 ; CHECK-NEXT: .LBB6_7: @ %for.cond5.for.cond.cleanup7_crit_edge.us.us ; CHECK-NEXT: @ in Loop: Header=BB6_8 Depth=2 -; CHECK-NEXT: ldr r3, [sp, #28] @ 4-byte Reload -; CHECK-NEXT: add.w r0, r1, r12 +; CHECK-NEXT: ldr r0, [sp, #28] @ 4-byte Reload +; CHECK-NEXT: add.w r3, r1, r12 ; CHECK-NEXT: adds r1, #1 -; CHECK-NEXT: cmp r1, r11 -; CHECK-NEXT: strh.w r2, [r3, r0, lsl #1] +; CHECK-NEXT: cmp r1, lr +; CHECK-NEXT: strh.w r2, [r0, r3, lsl #1] ; CHECK-NEXT: beq .LBB6_4 ; CHECK-NEXT: .LBB6_8: @ %for.cond5.preheader.us.us ; CHECK-NEXT: @ Parent Loop BB6_5 Depth=1 ; CHECK-NEXT: @ => This Loop Header: Depth=2 ; CHECK-NEXT: @ Child Loop BB6_11 Depth 3 ; CHECK-NEXT: @ Child Loop BB6_14 Depth 3 -; CHECK-NEXT: cmp.w r10, #3 +; CHECK-NEXT: cmp.w r11, #3 ; CHECK-NEXT: bhi .LBB6_10 ; CHECK-NEXT: @ %bb.9: @ in Loop: Header=BB6_8 Depth=2 -; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: movs r7, #0 ; CHECK-NEXT: movs r2, #0 ; CHECK-NEXT: b .LBB6_13 ; CHECK-NEXT: .LBB6_10: @ %vector.ph ; CHECK-NEXT: @ in Loop: Header=BB6_8 Depth=2 -; CHECK-NEXT: ldr.w lr, [sp, #12] @ 4-byte Reload ; CHECK-NEXT: vmov.i32 q0, #0x0 +; CHECK-NEXT: ldr r2, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: vmov q1, q4 -; CHECK-NEXT: dls lr, lr -; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload +; CHECK-NEXT: dls lr, r8 ; CHECK-NEXT: .LBB6_11: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB6_5 Depth=1 ; CHECK-NEXT: @ Parent Loop BB6_8 Depth=2 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=3 -; CHECK-NEXT: vdup.32 q3, r1 ; CHECK-NEXT: vadd.i32 q2, q1, q5 -; CHECK-NEXT: vmla.u32 q3, q1, r11 -; CHECK-NEXT: vldrh.s32 q1, [r8, q3, uxtw #1] -; CHECK-NEXT: vldrh.s32 q3, [r0], #8 -; CHECK-NEXT: vmul.i32 q1, q1, q3 +; CHECK-NEXT: vadd.i32 q1, q1, r1 +; CHECK-NEXT: vldrh.s32 q3, [r10, q1, uxtw #1] +; CHECK-NEXT: vldrh.s32 q1, [r2], #8 +; CHECK-NEXT: vmul.i32 q1, q3, q1 ; CHECK-NEXT: vadd.i32 q0, q1, q0 ; CHECK-NEXT: vmov q1, q2 ; CHECK-NEXT: le lr, .LBB6_11 ; CHECK-NEXT: @ %bb.12: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB6_8 Depth=2 -; CHECK-NEXT: ldr r0, [sp, #16] @ 4-byte Reload +; CHECK-NEXT: ldr r7, [sp, #16] @ 4-byte Reload ; CHECK-NEXT: vaddv.u32 r2, q0 -; CHECK-NEXT: cmp r0, r10 +; CHECK-NEXT: ldr.w lr, [sp, #104] +; CHECK-NEXT: cmp r7, r11 ; CHECK-NEXT: beq .LBB6_7 ; CHECK-NEXT: .LBB6_13: @ %for.body8.us.us.preheader ; CHECK-NEXT: @ in Loop: Header=BB6_8 Depth=2 -; CHECK-NEXT: mla r3, r11, r0, r1 -; CHECK-NEXT: sub.w r5, r10, r0 -; CHECK-NEXT: add r0, r9 -; CHECK-NEXT: ldr r7, [sp, #24] @ 4-byte Reload -; CHECK-NEXT: add.w r0, r7, r0, lsl #1 -; CHECK-NEXT: add.w r3, r8, r3, lsl #1 +; CHECK-NEXT: mla r3, lr, r7, r1 +; CHECK-NEXT: sub.w r5, r11, r7 +; CHECK-NEXT: add r7, r9 +; CHECK-NEXT: ldr r0, [sp, #24] @ 4-byte Reload +; CHECK-NEXT: add.w r7, r0, r7, lsl #1 +; CHECK-NEXT: add.w r3, r10, r3, lsl #1 ; CHECK-NEXT: .LBB6_14: @ %for.body8.us.us ; CHECK-NEXT: @ Parent Loop BB6_5 Depth=1 ; CHECK-NEXT: @ Parent Loop BB6_8 Depth=2 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=3 ; CHECK-NEXT: ldrsh.w r6, [r3] ; CHECK-NEXT: add r3, r4 -; CHECK-NEXT: ldrsh r7, [r0], #2 +; CHECK-NEXT: ldrsh r0, [r7], #2 ; CHECK-NEXT: subs r5, #1 -; CHECK-NEXT: smlabb r2, r6, r7, r2 +; CHECK-NEXT: smlabb r2, r6, r0, r2 ; CHECK-NEXT: bne .LBB6_14 ; CHECK-NEXT: b .LBB6_7 ; CHECK-NEXT: .p2align 4