Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4463,7 +4463,11 @@ case Instruction::URem: // TODO: We can use the loop-preheader as context point here and get // context sensitive reasoning - return !isSafeToSpeculativelyExecute(I); + // Note: Scalable can't predicate and thus must go through the widening + // strategy. Long term, we want fixed to make a cost based decision + // between widening and scalarization, but for now, fixed is left + // unconditionally using the scalar path. + return !VF.isScalable() && !isSafeToSpeculativelyExecute(I); } return false; } @@ -7017,33 +7021,63 @@ case Instruction::SDiv: case Instruction::URem: case Instruction::SRem: - // If we have a predicated instruction, it may not be executed for each - // vector lane. Get the scalarization cost and scale this amount by the - // probability of executing the predicated block. If the instruction is not - // predicated, we fall through to the next case. - if (VF.isVector() && isScalarWithPredication(I, VF)) { + if (VF.isVector() && blockNeedsPredicationForAnyReason(I->getParent()) && + !isSafeToSpeculativelyExecute(I)) { + // If we're speculating lanes, we have two options - scalarization and + // guarded widening. + if (isScalarWithPredication(I, VF)) { + // Get the scalarization cost and scale this amount by the probability of + // executing the predicated block. If the instruction is not predicated, + // we fall through to the next case. + InstructionCost Cost = 0; + + // These instructions have a non-void type, so account for the phi nodes + // that we will create. This cost is likely to be zero. The phi node + // cost, if any, should be scaled by the block probability because it + // models a copy at the end of each predicated block. + Cost += VF.getKnownMinValue() * + TTI.getCFInstrCost(Instruction::PHI, CostKind); + + // The cost of the non-predicated instruction. + Cost += VF.getKnownMinValue() * + TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); + + // The cost of insertelement and extractelement instructions needed for + // scalarization. + Cost += getScalarizationOverhead(I, VF); + + // Scale the cost by the probability of executing the predicated blocks. + // This assumes the predicated block for each vector lane is equally + // likely. + return Cost / getReciprocalPredBlockProb(); + } + InstructionCost Cost = 0; - // These instructions have a non-void type, so account for the phi nodes - // that we will create. This cost is likely to be zero. The phi node - // cost, if any, should be scaled by the block probability because it - // models a copy at the end of each predicated block. - Cost += VF.getKnownMinValue() * - TTI.getCFInstrCost(Instruction::PHI, CostKind); - - // The cost of the non-predicated instruction. - Cost += VF.getKnownMinValue() * - TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); - - // The cost of insertelement and extractelement instructions needed for - // scalarization. - Cost += getScalarizationOverhead(I, VF); - - // Scale the cost by the probability of executing the predicated blocks. - // This assumes the predicated block for each vector lane is equally - // likely. - return Cost / getReciprocalPredBlockProb(); + // The cost of the select guard to ensure all lanes are well defined + // after we speculate above any internal control flow. + Cost += TTI.getCmpSelInstrCost( + Instruction::Select, ToVectorTy(I->getType(), VF), + ToVectorTy(Type::getInt1Ty(I->getContext()), VF), + CmpInst::BAD_ICMP_PREDICATE, CostKind); + + // Certain instructions can be cheaper to vectorize if they have a constant + // second vector operand. One example of this are shifts on x86. + Value *Op2 = I->getOperand(1); + TargetTransformInfo::OperandValueProperties Op2VP; + TargetTransformInfo::OperandValueKind Op2VK = + TTI.getOperandInfo(Op2, Op2VP); + if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) + Op2VK = TargetTransformInfo::OK_UniformValue; + + SmallVector Operands(I->operand_values()); + Cost += TTI.getArithmeticInstrCost( + I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, + Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); + + return Cost; } + // We've proven all lanes safe to speculate, fall through. LLVM_FALLTHROUGH; case Instruction::Add: case Instruction::FAdd: @@ -8294,55 +8328,63 @@ Range); } -VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, - ArrayRef Operands) const { - auto IsVectorizableOpcode = [](unsigned Opcode) { - switch (Opcode) { - case Instruction::Add: - case Instruction::And: - case Instruction::AShr: - case Instruction::BitCast: - case Instruction::FAdd: - case Instruction::FCmp: - case Instruction::FDiv: - case Instruction::FMul: - case Instruction::FNeg: - case Instruction::FPExt: - case Instruction::FPToSI: - case Instruction::FPToUI: - case Instruction::FPTrunc: - case Instruction::FRem: - case Instruction::FSub: - case Instruction::ICmp: - case Instruction::IntToPtr: - case Instruction::LShr: - case Instruction::Mul: - case Instruction::Or: - case Instruction::PtrToInt: - case Instruction::SDiv: - case Instruction::Select: - case Instruction::SExt: - case Instruction::Shl: - case Instruction::SIToFP: - case Instruction::SRem: - case Instruction::Sub: - case Instruction::Trunc: - case Instruction::UDiv: - case Instruction::UIToFP: - case Instruction::URem: - case Instruction::Xor: - case Instruction::ZExt: - case Instruction::Freeze: - return true; +VPRecipeBase *VPRecipeBuilder::tryToWiden(Instruction *I, + ArrayRef Operands, + VPlanPtr &Plan) { + switch (I->getOpcode()) { + default: + return nullptr; + case Instruction::SDiv: + case Instruction::UDiv: + case Instruction::SRem: + case Instruction::URem: { + // If not provably safe, use a select to form a safe divisor before widening the + // div/rem operation itself. Otherwise fall through to general handling below. + // NOTE: There's a subtle assumption here that we have no exceptional exits within + // a block, otherwise we'd need to prove speculation safety without explicit + // block predication. If that assumption is ever invalidated, this code needs + // updated. + if (CM.blockNeedsPredicationForAnyReason(I->getParent()) && + !isSafeToSpeculativelyExecute(I)) { + VPValue *Mask = createBlockInMask(I->getParent(), Plan); + return new VPWidenDivRemRecipe(*I, make_range(Operands.begin(), Operands.end()), + Mask); } - return false; + LLVM_FALLTHROUGH; + } + case Instruction::Add: + case Instruction::And: + case Instruction::AShr: + case Instruction::BitCast: + case Instruction::FAdd: + case Instruction::FCmp: + case Instruction::FDiv: + case Instruction::FMul: + case Instruction::FNeg: + case Instruction::FPExt: + case Instruction::FPToSI: + case Instruction::FPToUI: + case Instruction::FPTrunc: + case Instruction::FRem: + case Instruction::FSub: + case Instruction::ICmp: + case Instruction::IntToPtr: + case Instruction::LShr: + case Instruction::Mul: + case Instruction::Or: + case Instruction::PtrToInt: + case Instruction::Select: + case Instruction::SExt: + case Instruction::Shl: + case Instruction::SIToFP: + case Instruction::Sub: + case Instruction::Trunc: + case Instruction::UIToFP: + case Instruction::Xor: + case Instruction::ZExt: + case Instruction::Freeze: + return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); }; - - if (!IsVectorizableOpcode(I->getOpcode())) - return nullptr; - - // Success: widen this instruction. - return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); } void VPRecipeBuilder::fixHeaderPhis() { @@ -8542,7 +8584,7 @@ *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); } - return toVPRecipeResult(tryToWiden(Instr, Operands)); + return toVPRecipeResult(tryToWiden(Instr, Operands, Plan)); } void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, Index: llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h =================================================================== --- llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h +++ llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h @@ -100,7 +100,8 @@ /// Check if \p I has an opcode that can be widened and return a VPWidenRecipe /// if it can. The function should only be called if the cost-model indicates /// that widening should be performed. - VPWidenRecipe *tryToWiden(Instruction *I, ArrayRef Operands) const; + VPRecipeBase *tryToWiden(Instruction *I, ArrayRef Operands, + VPlanPtr &Plan); /// Return a VPRecipeOrValueTy with VPRecipeBase * being set. This can be used to force the use as VPRecipeBase* for recipe sub-types that also inherit from VPValue. VPRecipeOrVPValueTy toVPRecipeResult(VPRecipeBase *R) const { return R; } Index: llvm/lib/Transforms/Vectorize/VPlan.h =================================================================== --- llvm/lib/Transforms/Vectorize/VPlan.h +++ llvm/lib/Transforms/Vectorize/VPlan.h @@ -753,6 +753,7 @@ inline bool VPUser::classof(const VPDef *Def) { return Def->getVPDefID() == VPRecipeBase::VPInstructionSC || Def->getVPDefID() == VPRecipeBase::VPWidenSC || + Def->getVPDefID() == VPRecipeBase::VPWidenDivRemSC || Def->getVPDefID() == VPRecipeBase::VPWidenCallSC || Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC || Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC || @@ -944,6 +945,38 @@ #endif }; +/// VPWidenDivRemRecipe is a recipe for widening a div or rem instruction +/// with a appropriate select to ensure the RHS can not fault. +class VPWidenDivRemRecipe : public VPRecipeBase, public VPValue { +public: + template + VPWidenDivRemRecipe(Instruction &I, iterator_range Operands, + VPValue *BlockInMask) + : VPRecipeBase(VPRecipeBase::VPWidenDivRemSC, Operands), + VPValue(VPValue::VPVWidenDivRemSC, &I, this) { + addOperand(BlockInMask); + } + + ~VPWidenDivRemRecipe() override = default; + + /// Method to support type inquiry through isa, cast, and dyn_cast. + static inline bool classof(const VPDef *D) { + return D->getVPDefID() == VPRecipeBase::VPWidenDivRemSC; + } + static inline bool classof(const VPValue *V) { + return V->getVPValueID() == VPValue::VPVWidenDivRemSC; + } + + /// Produce widened copies of all Ingredients. + void execute(VPTransformState &State) override; + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + /// Print the recipe. + void print(raw_ostream &O, const Twine &Indent, + VPSlotTracker &SlotTracker) const override; +#endif +}; + /// A recipe for widening Call instructions. class VPWidenCallRecipe : public VPRecipeBase, public VPValue { Index: llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -621,6 +621,64 @@ O << " = " << getUnderlyingInstr()->getOpcodeName() << " "; printOperands(O, SlotTracker); } +#endif + +void VPWidenDivRemRecipe::execute(VPTransformState &State) { + auto &I = *cast(getUnderlyingValue()); + auto &Builder = State.Builder; + switch (I.getOpcode()) { + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::SRem: + case Instruction::URem: { + // Just widen unops and binops. + State.setDebugLocFromInst(&I); + + for (unsigned Part = 0; Part < State.UF; ++Part) { + SmallVector Ops; + Ops.push_back(State.get(getOperand(0), Part)); + Ops.push_back(State.get(getOperand(1), Part)); + + Type *Ty = Ops[1]->getType(); + Value *One = ConstantInt::get(Ty, 1u, false); + Value *Mask = State.get(getOperand(2), Part); + Ops[1] = Builder.CreateSelect(Mask, Ops[1], One); + + Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); + + if (auto *VecOp = dyn_cast(V)) { + VecOp->copyIRFlags(&I); + + // If the instruction is vectorized and was in a basic block that needed + // predication, we can't propagate poison-generating flags (nuw/nsw, + // exact, etc.). The control flow has been linearized and the + // instruction is no longer guarded by the predicate, which could make + // the flag properties to no longer hold. + if (State.MayGeneratePoisonRecipes.contains(this)) + VecOp->dropPoisonGeneratingFlags(); + } + + // Use this vector value for all users of the original instruction. + State.set(this, V, Part); + State.addMetadata(V, &I); + } + + break; + } + default: + // This instruction is not vectorized by simple widening. + LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); + llvm_unreachable("Unhandled instruction!"); + } // end of switch. +} +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +void VPWidenDivRemRecipe::print(raw_ostream &O, const Twine &Indent, + VPSlotTracker &SlotTracker) const { + O << Indent << "WIDEN-DIVREM "; + printAsOperand(O, SlotTracker); + O << " = " << getUnderlyingInstr()->getOpcodeName() << " "; + printOperands(O, SlotTracker); +} void VPWidenIntOrFpInductionRecipe::print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const { Index: llvm/lib/Transforms/Vectorize/VPlanValue.h =================================================================== --- llvm/lib/Transforms/Vectorize/VPlanValue.h +++ llvm/lib/Transforms/Vectorize/VPlanValue.h @@ -95,6 +95,7 @@ VPVReductionSC, VPVReplicateSC, VPVWidenSC, + VPVWidenDivRemSC, VPVWidenCallSC, VPVWidenCanonicalIVSC, VPVWidenGEPSC, @@ -354,6 +355,7 @@ VPWidenGEPSC, VPWidenMemoryInstructionSC, VPWidenSC, + VPWidenDivRemSC, VPWidenSelectSC, // Phi-like recipes. Need to be kept together. Index: llvm/test/Transforms/LoopVectorize/AArch64/scalable-predicate-instruction.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/AArch64/scalable-predicate-instruction.ll +++ llvm/test/Transforms/LoopVectorize/AArch64/scalable-predicate-instruction.ll @@ -11,13 +11,9 @@ ; a[i] /= b[i]; ; } -; Scalarizing the division cannot be done for scalable vectors at the moment -; when the loop needs predication -; Future implementation of llvm.vp could allow this to happen - define void @predication_in_loop(i32* %a, i32* %b, i32* %cond) #0 { ; CHECK-LABEL: @predication_in_loop -; CHECK-NOT: sdiv +; CHECK: sdiv ; entry: br label %for.body Index: llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll +++ llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll @@ -701,18 +701,62 @@ define void @simple_idiv(i32* noalias %dst, i32* noalias %src, i64 %n) #0 { ; CHECK-LABEL: @simple_idiv( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1) +; CHECK-NEXT: [[TMP0:%.*]] = sub i64 -1, [[UMAX]] +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4 +; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; CHECK-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4 +; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 +; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], [[TMP8]] +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[UMAX]]) +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, i32* [[SRC:%.*]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[DST:%.*]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP10]], i32 0 +; CHECK-NEXT: [[TMP13:%.*]] = bitcast i32* [[TMP12]] to * +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i32.p0nxv4i32(* [[TMP13]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[TMP11]], i32 0 +; CHECK-NEXT: [[TMP15:%.*]] = bitcast i32* [[TMP14]] to * +; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = call @llvm.masked.load.nxv4i32.p0nxv4i32(* [[TMP15]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; CHECK-NEXT: [[TMP16:%.*]] = select [[ACTIVE_LANE_MASK]], [[WIDE_MASKED_LOAD2]], shufflevector ( insertelement ( poison, i32 1, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP17:%.*]] = udiv [[WIDE_MASKED_LOAD]], [[TMP16]] +; CHECK-NEXT: [[TMP18:%.*]] = bitcast i32* [[TMP14]] to * +; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0nxv4i32( [[TMP17]], * [[TMP18]], i32 4, [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 4 +; CHECK-NEXT: [[INDEX_NEXT3]] = add i64 [[INDEX1]], [[TMP20]] +; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT3]], i64 [[UMAX]]) +; CHECK-NEXT: [[TMP21:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP22:%.*]] = extractelement [[TMP21]], i32 0 +; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: br i1 true, label [[WHILE_END_LOOPEXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[WHILE_BODY:%.*]] ; CHECK: while.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, i32* [[SRC:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, i32* [[DST:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, i32* [[SRC]], i64 [[INDEX]] +; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, i32* [[DST]], i64 [[INDEX]] ; CHECK-NEXT: [[VAL1:%.*]] = load i32, i32* [[GEP1]], align 4 ; CHECK-NEXT: [[VAL2:%.*]] = load i32, i32* [[GEP2]], align 4 ; CHECK-NEXT: [[RES:%.*]] = udiv i32 [[VAL1]], [[VAL2]] ; CHECK-NEXT: store i32 [[RES]], i32* [[GEP2]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N:%.*]] -; CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT:%.*]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: while.end.loopexit: ; CHECK-NEXT: ret void ; Index: llvm/test/Transforms/LoopVectorize/RISCV/scalable-divrem.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/RISCV/scalable-divrem.ll +++ llvm/test/Transforms/LoopVectorize/RISCV/scalable-divrem.ll @@ -249,12 +249,43 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-LABEL: @predicated_udiv( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP0]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[V:%.*]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP4]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne [[BROADCAST_SPLAT]], zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = select [[TMP5]], [[BROADCAST_SPLAT]], shufflevector ( insertelement ( poison, i64 1, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP7:%.*]] = udiv [[WIDE_LOAD]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = xor [[TMP5]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[PREDPHI:%.*]] = select [[TMP5]], [[TMP7]], [[WIDE_LOAD]] +; CHECK-NEXT: store [[PREDPHI]], ptr [[TMP4]], align 8 +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V:%.*]], 0 +; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 ; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] ; CHECK: do_op: ; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]] @@ -264,7 +295,7 @@ ; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -294,12 +325,43 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-LABEL: @predicated_sdiv( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP0]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[V:%.*]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP4]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne [[BROADCAST_SPLAT]], zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = select [[TMP5]], [[BROADCAST_SPLAT]], shufflevector ( insertelement ( poison, i64 1, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP7:%.*]] = sdiv [[WIDE_LOAD]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = xor [[TMP5]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[PREDPHI:%.*]] = select [[TMP5]], [[TMP7]], [[WIDE_LOAD]] +; CHECK-NEXT: store [[PREDPHI]], ptr [[TMP4]], align 8 +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V:%.*]], 0 +; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 ; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] ; CHECK: do_op: ; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]] @@ -309,7 +371,7 @@ ; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -361,7 +423,7 @@ ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -382,7 +444,7 @@ ; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -434,7 +496,7 @@ ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -455,7 +517,7 @@ ; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -485,10 +547,61 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) { ; CHECK-LABEL: @predicated_sdiv_by_minus_one( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 16 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 16 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], 0 +; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 1 +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP12]], align 1 +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP14:%.*]] = mul i32 [[TMP13]], 8 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 [[TMP14]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP15]], align 1 +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i8 -128, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP17:%.*]] = icmp ne [[WIDE_LOAD1]], shufflevector ( insertelement ( poison, i8 -128, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP18:%.*]] = select [[TMP16]], shufflevector ( insertelement ( poison, i8 -1, i32 0), poison, zeroinitializer), shufflevector ( insertelement ( poison, i8 1, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP19:%.*]] = sdiv [[WIDE_LOAD]], [[TMP18]] +; CHECK-NEXT: [[TMP20:%.*]] = select [[TMP17]], shufflevector ( insertelement ( poison, i8 -1, i32 0), poison, zeroinitializer), shufflevector ( insertelement ( poison, i8 1, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP21:%.*]] = sdiv [[WIDE_LOAD1]], [[TMP20]] +; CHECK-NEXT: [[TMP22:%.*]] = xor [[TMP16]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP23:%.*]] = xor [[TMP17]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[PREDPHI:%.*]] = select [[TMP16]], [[TMP19]], [[WIDE_LOAD]] +; CHECK-NEXT: [[PREDPHI2:%.*]] = select [[TMP17]], [[TMP21]], [[WIDE_LOAD1]] +; CHECK-NEXT: store [[PREDPHI]], ptr [[TMP12]], align 1 +; CHECK-NEXT: [[TMP24:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP25:%.*]] = mul i32 [[TMP24]], 8 +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 [[TMP25]] +; CHECK-NEXT: store [[PREDPHI2]], ptr [[TMP26]], align 1 +; CHECK-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 16 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP28]] +; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[IV]] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[ELEM]], -128 ; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] @@ -500,7 +613,7 @@ ; CHECK-NEXT: store i8 [[PHI]], ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ;