Index: llvm/include/llvm/Analysis/VectorUtils.h =================================================================== --- llvm/include/llvm/Analysis/VectorUtils.h +++ llvm/include/llvm/Analysis/VectorUtils.h @@ -273,6 +273,20 @@ return Ret; } + static bool hasMaskedVariant(const CallInst &CI, + std::optional VF = std::nullopt) { + // Check whether we have at least one masked vector version of a scalar + // function. If no VF is specified then we check for any masked variant, + // otherwise we look for one that matches the supplied VF. + auto Mappings = VFDatabase::getMappings(CI); + for (VFInfo Info : Mappings) + if (!VF || Info.Shape.VF == *VF) + if (Info.isMasked()) + return true; + + return false; + } + /// Constructor, requires a CallInst instance. VFDatabase(CallInst &CI) : M(CI.getModule()), CI(CI), Index: llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -1124,6 +1124,16 @@ if (isa(&I)) continue; + // We can allow masked calls if there's at least one vector variant, even + // if we end up scalarizing due to the cost model calculations. + // TODO: Allow other calls if they have appropriate attributes... readonly + // and argmemonly? + if (CallInst *CI = dyn_cast(&I)) + if (VFDatabase::hasMaskedVariant(*CI)) { + MaskedOp.insert(CI); + continue; + } + // Loads are handled via masking (or speculated if safe to do so.) if (auto *LI = dyn_cast(&I)) { if (!SafePtrs.count(LI->getPointerOperand())) Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -3466,6 +3466,7 @@ Function *F = CI->getCalledFunction(); Type *ScalarRetTy = CI->getType(); SmallVector Tys, ScalarTys; + bool MaskRequired = Legal->isMaskRequired(CI); for (auto &ArgOp : CI->args()) ScalarTys.push_back(ArgOp->getType()); @@ -3495,12 +3496,14 @@ // If we can't emit a vector call for this function, then the currently found // cost is the cost we need to return. InstructionCost MaskCost = 0; - VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); + VFShape Shape = VFShape::get(*CI, VF, MaskRequired); + if (NeedsMask) + *NeedsMask = MaskRequired; Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); // If we want an unmasked vector function but can't find one matching the VF, // maybe we can find vector function that does use a mask and synthesize // an all-true mask. - if (!VecFunc) { + if (!VecFunc && !MaskRequired) { Shape = VFShape::get(*CI, VF, /*HasGlobalPred=*/true); VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); // If we found one, add in the cost of creating a mask @@ -4429,6 +4432,8 @@ switch(I->getOpcode()) { default: return true; + case Instruction::Call: + return !VFDatabase::hasMaskedVariant(*(cast(I)), VF); case Instruction::Load: case Instruction::Store: { auto *Ptr = getLoadStorePointerOperand(I); @@ -4492,6 +4497,8 @@ // TODO: We can use the loop-preheader as context point here and get // context sensitive reasoning return !isSafeToSpeculativelyExecute(I); + case Instruction::Call: + return Legal->isMaskRequired(I); } } @@ -8378,7 +8385,7 @@ VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, ArrayRef Operands, VFRange &Range, - VPlanPtr &Plan) const { + VPlanPtr &Plan) { bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI, VF); @@ -8445,10 +8452,19 @@ Range); if (ShouldUseVectorCall) { if (NeedsMask) { - // If our vector variant requires a mask, then synthesize an all-true - // mask and insert it into the operands vector in the right place. - VPValue *Mask = Plan->getOrAddVPValue(ConstantInt::getTrue( - IntegerType::getInt1Ty(Variant->getFunctionType()->getContext()))); + // We have 2 cases that would require a mask: + // 1) The block needs to be predicated, either due to a conditional + // in the scalar loop or use of an active lane mask with + // tail-folding, and we use the appropriate mask for the block. + // 2) No mask is required for the block, but the only available + // vector variant at this VF requires a mask, so we synthesize an + // all-true mask. + VPValue *Mask = nullptr; + if (Legal->isMaskRequired(CI)) + Mask = createBlockInMask(CI->getParent(), *Plan); + else + Mask = Plan->getOrAddVPValue(ConstantInt::getTrue( + IntegerType::getInt1Ty(Variant->getFunctionType()->getContext()))); VFShape Shape = VFShape::get(*CI, VariantVF, /*HasGlobalPred=*/true); unsigned MaskPos = 0; Index: llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h =================================================================== --- llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h +++ llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h @@ -95,7 +95,7 @@ /// return a new VPWidenCallRecipe. Range.End may be decreased to ensure same /// decision from \p Range.Start to \p Range.End. VPWidenCallRecipe *tryToWidenCall(CallInst *CI, ArrayRef Operands, - VFRange &Range, VPlanPtr &Plan) const; + VFRange &Range, VPlanPtr &Plan); /// Check if \p I has an opcode that can be widened and return a VPWidenRecipe /// if it can. The function should only be called if the cost-model indicates Index: llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll +++ llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll @@ -53,17 +53,51 @@ ; ; TFALWAYS-LABEL: @test_widen( ; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFALWAYS-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFALWAYS-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFALWAYS: vector.ph: +; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFALWAYS-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFALWAYS-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFALWAYS-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFALWAYS-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFALWAYS-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) +; TFALWAYS-NEXT: br label [[VECTOR_BODY:%.*]] +; TFALWAYS: vector.body: +; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP8]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; TFALWAYS-NEXT: [[TMP9:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) +; TFALWAYS-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP9]], ptr [[TMP10]], i32 4, [[ACTIVE_LANE_MASK]]) +; TFALWAYS-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 2 +; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP12]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFALWAYS-NEXT: [[TMP13:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP14:%.*]] = extractelement [[TMP13]], i32 0 +; TFALWAYS-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; TFALWAYS: middle.block: +; TFALWAYS-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFALWAYS: scalar.ph: +; TFALWAYS-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] ; TFALWAYS: for.body: -; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4 -; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]] -; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4:[0-9]+]] +; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFALWAYS-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; TFALWAYS: for.cond.cleanup: ; TFALWAYS-NEXT: ret void ; @@ -71,29 +105,36 @@ ; TFFALLBACK-NEXT: entry: ; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 -; TFFALLBACK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; TFFALLBACK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFFALLBACK-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFFALLBACK-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFFALLBACK: vector.ph: -; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 -; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFFALLBACK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFFALLBACK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFFALLBACK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) ; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] ; TFFALLBACK: vector.body: ; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; TFFALLBACK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] -; TFFALLBACK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP4]], align 4 -; TFFALLBACK-NEXT: [[TMP5:%.*]] = call @foo_vector( [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer)) -; TFFALLBACK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; TFFALLBACK-NEXT: store [[TMP5]], ptr [[TMP6]], align 4 -; TFFALLBACK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; TFFALLBACK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2 -; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] -; TFFALLBACK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TFFALLBACK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP8]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; TFFALLBACK-NEXT: [[TMP9:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) +; TFFALLBACK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP9]], ptr [[TMP10]], i32 4, [[ACTIVE_LANE_MASK]]) +; TFFALLBACK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 2 +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP12]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFFALLBACK-NEXT: [[TMP13:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP14:%.*]] = extractelement [[TMP13]], i32 0 +; TFFALLBACK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TFFALLBACK: middle.block: -; TFFALLBACK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; TFFALLBACK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFFALLBACK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFFALLBACK: scalar.ph: ; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] @@ -101,7 +142,7 @@ ; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4 -; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR2:[0-9]+]] +; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4:[0-9]+]] ; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFFALLBACK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -132,67 +173,177 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-LABEL: @test_if_then( ; TFNONE-NEXT: entry: +; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFNONE: vector.ph: +; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 +; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] +; TFNONE: vector.body: +; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFNONE-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP4]], align 8 +; TFNONE-NEXT: [[TMP5:%.*]] = icmp ugt [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) +; TFNONE-NEXT: [[TMP6:%.*]] = call @foo_vector( [[WIDE_LOAD]], [[TMP5]]) +; TFNONE-NEXT: [[TMP7:%.*]] = xor [[TMP5]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFNONE-NEXT: [[PREDPHI:%.*]] = select [[TMP7]], zeroinitializer, [[TMP6]] +; TFNONE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: store [[PREDPHI]], ptr [[TMP8]], align 8 +; TFNONE-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; TFNONE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; TFNONE-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFNONE: middle.block: +; TFNONE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; TFNONE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFNONE: scalar.ph: +; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFNONE-NEXT: br label [[FOR_BODY:%.*]] ; TFNONE: for.body: -; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: [[TMP12:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 +; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP12]], 50 ; TFNONE-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] ; TFNONE: if.then: -; TFNONE-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR2]] +; TFNONE-NEXT: [[TMP13:%.*]] = call i64 @foo(i64 [[TMP12]]) #[[ATTR2]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.end: -; TFNONE-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] -; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: store i64 [[TMP2]], ptr [[ARRAYIDX1]], align 8 +; TFNONE-NEXT: [[TMP14:%.*]] = phi i64 [ [[TMP13]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] +; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: store i64 [[TMP14]], ptr [[ARRAYIDX1]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; ; TFALWAYS-LABEL: @test_if_then( ; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFALWAYS-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFALWAYS-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFALWAYS: vector.ph: +; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFALWAYS-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFALWAYS-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFALWAYS-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFALWAYS-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFALWAYS-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) +; TFALWAYS-NEXT: br label [[VECTOR_BODY:%.*]] +; TFALWAYS: vector.body: +; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP8]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFALWAYS-NEXT: [[TMP9:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP10:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP9]], zeroinitializer +; TFALWAYS-NEXT: [[TMP11:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP10]]) +; TFALWAYS-NEXT: [[TMP12:%.*]] = xor [[TMP9]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP13:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP12]], zeroinitializer +; TFALWAYS-NEXT: [[PREDPHI:%.*]] = select [[TMP13]], zeroinitializer, [[TMP11]] +; TFALWAYS-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[TMP15:%.*]] = or [[TMP10]], [[TMP13]] +; TFALWAYS-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[PREDPHI]], ptr [[TMP14]], i32 8, [[TMP15]]) +; TFALWAYS-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 2 +; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP17]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFALWAYS-NEXT: [[TMP18:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP19:%.*]] = extractelement [[TMP18]], i32 0 +; TFALWAYS-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFALWAYS: middle.block: +; TFALWAYS-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFALWAYS: scalar.ph: +; TFALWAYS-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] ; TFALWAYS: for.body: -; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; TFALWAYS-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[TMP20:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 +; TFALWAYS-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP20]], 50 ; TFALWAYS-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] ; TFALWAYS: if.then: -; TFALWAYS-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR1]] +; TFALWAYS-NEXT: [[TMP21:%.*]] = call i64 @foo(i64 [[TMP20]]) #[[ATTR4]] ; TFALWAYS-NEXT: br label [[IF_END]] ; TFALWAYS: if.end: -; TFALWAYS-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] -; TFALWAYS-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: store i64 [[TMP2]], ptr [[ARRAYIDX1]], align 8 +; TFALWAYS-NEXT: [[TMP22:%.*]] = phi i64 [ [[TMP21]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] +; TFALWAYS-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: store i64 [[TMP22]], ptr [[ARRAYIDX1]], align 8 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TFALWAYS: for.cond.cleanup: ; TFALWAYS-NEXT: ret void ; ; TFFALLBACK-LABEL: @test_if_then( ; TFFALLBACK-NEXT: entry: +; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFFALLBACK-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFFALLBACK-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFFALLBACK: vector.ph: +; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFFALLBACK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFFALLBACK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFFALLBACK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) +; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] +; TFFALLBACK: vector.body: +; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP8]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFFALLBACK-NEXT: [[TMP9:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP10:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP9]], zeroinitializer +; TFFALLBACK-NEXT: [[TMP11:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP10]]) +; TFFALLBACK-NEXT: [[TMP12:%.*]] = xor [[TMP9]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP13:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP12]], zeroinitializer +; TFFALLBACK-NEXT: [[PREDPHI:%.*]] = select [[TMP13]], zeroinitializer, [[TMP11]] +; TFFALLBACK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP15:%.*]] = or [[TMP10]], [[TMP13]] +; TFFALLBACK-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[PREDPHI]], ptr [[TMP14]], i32 8, [[TMP15]]) +; TFFALLBACK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 2 +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP17]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFFALLBACK-NEXT: [[TMP18:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP19:%.*]] = extractelement [[TMP18]], i32 0 +; TFFALLBACK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFFALLBACK: middle.block: +; TFFALLBACK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFFALLBACK: scalar.ph: +; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] ; TFFALLBACK: for.body: -; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; TFFALLBACK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: [[TMP20:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 +; TFFALLBACK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP20]], 50 ; TFFALLBACK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] ; TFFALLBACK: if.then: -; TFFALLBACK-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR2]] +; TFFALLBACK-NEXT: [[TMP21:%.*]] = call i64 @foo(i64 [[TMP20]]) #[[ATTR4]] ; TFFALLBACK-NEXT: br label [[IF_END]] ; TFFALLBACK: if.end: -; TFFALLBACK-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] -; TFFALLBACK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: store i64 [[TMP2]], ptr [[ARRAYIDX1]], align 8 +; TFFALLBACK-NEXT: [[TMP22:%.*]] = phi i64 [ [[TMP21]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: store i64 [[TMP22]], ptr [[ARRAYIDX1]], align 8 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TFFALLBACK: for.cond.cleanup: ; TFFALLBACK-NEXT: ret void ; @@ -229,76 +380,189 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-LABEL: @test_widen_if_then_else( ; TFNONE-NEXT: entry: +; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFNONE: vector.ph: +; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 +; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] +; TFNONE: vector.body: +; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFNONE-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP4]], align 8 +; TFNONE-NEXT: [[TMP5:%.*]] = icmp ugt [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) +; TFNONE-NEXT: [[TMP6:%.*]] = xor [[TMP5]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFNONE-NEXT: [[TMP7:%.*]] = call @foo_vector( zeroinitializer, [[TMP6]]) +; TFNONE-NEXT: [[TMP8:%.*]] = call @foo_vector( [[WIDE_LOAD]], [[TMP5]]) +; TFNONE-NEXT: [[PREDPHI:%.*]] = select [[TMP6]], [[TMP7]], [[TMP8]] +; TFNONE-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: store [[PREDPHI]], ptr [[TMP9]], align 8 +; TFNONE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 2 +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] +; TFNONE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; TFNONE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFNONE: middle.block: +; TFNONE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; TFNONE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFNONE: scalar.ph: +; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFNONE-NEXT: br label [[FOR_BODY:%.*]] ; TFNONE: for.body: -; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: [[TMP13:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 +; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP13]], 50 ; TFNONE-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; TFNONE: if.then: -; TFNONE-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR3:[0-9]+]] +; TFNONE-NEXT: [[TMP14:%.*]] = call i64 @foo(i64 [[TMP13]]) #[[ATTR3:[0-9]+]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.else: -; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @foo(i64 0) #[[ATTR3]] +; TFNONE-NEXT: [[TMP15:%.*]] = call i64 @foo(i64 0) #[[ATTR3]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.end: -; TFNONE-NEXT: [[TMP3:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ [[TMP2]], [[IF_ELSE]] ] -; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: store i64 [[TMP3]], ptr [[ARRAYIDX1]], align 8 +; TFNONE-NEXT: [[TMP16:%.*]] = phi i64 [ [[TMP14]], [[IF_THEN]] ], [ [[TMP15]], [[IF_ELSE]] ] +; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: store i64 [[TMP16]], ptr [[ARRAYIDX1]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; ; TFALWAYS-LABEL: @test_widen_if_then_else( ; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFALWAYS-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFALWAYS-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFALWAYS: vector.ph: +; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFALWAYS-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFALWAYS-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFALWAYS-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFALWAYS-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFALWAYS-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) +; TFALWAYS-NEXT: br label [[VECTOR_BODY:%.*]] +; TFALWAYS: vector.body: +; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP8]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFALWAYS-NEXT: [[TMP9:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP10:%.*]] = xor [[TMP9]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP11:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP10]], zeroinitializer +; TFALWAYS-NEXT: [[TMP12:%.*]] = call @foo_vector( zeroinitializer, [[TMP11]]) +; TFALWAYS-NEXT: [[TMP13:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP9]], zeroinitializer +; TFALWAYS-NEXT: [[TMP14:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP13]]) +; TFALWAYS-NEXT: [[PREDPHI:%.*]] = select [[TMP11]], [[TMP12]], [[TMP14]] +; TFALWAYS-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[TMP16:%.*]] = or [[TMP11]], [[TMP13]] +; TFALWAYS-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[PREDPHI]], ptr [[TMP15]], i32 8, [[TMP16]]) +; TFALWAYS-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 2 +; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP18]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFALWAYS-NEXT: [[TMP19:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP20:%.*]] = extractelement [[TMP19]], i32 0 +; TFALWAYS-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFALWAYS: middle.block: +; TFALWAYS-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFALWAYS: scalar.ph: +; TFALWAYS-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] ; TFALWAYS: for.body: -; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; TFALWAYS-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[TMP21:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 +; TFALWAYS-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP21]], 50 ; TFALWAYS-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; TFALWAYS: if.then: -; TFALWAYS-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR2:[0-9]+]] +; TFALWAYS-NEXT: [[TMP22:%.*]] = call i64 @foo(i64 [[TMP21]]) #[[ATTR5:[0-9]+]] ; TFALWAYS-NEXT: br label [[IF_END]] ; TFALWAYS: if.else: -; TFALWAYS-NEXT: [[TMP2:%.*]] = call i64 @foo(i64 0) #[[ATTR2]] +; TFALWAYS-NEXT: [[TMP23:%.*]] = call i64 @foo(i64 0) #[[ATTR5]] ; TFALWAYS-NEXT: br label [[IF_END]] ; TFALWAYS: if.end: -; TFALWAYS-NEXT: [[TMP3:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ [[TMP2]], [[IF_ELSE]] ] -; TFALWAYS-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: store i64 [[TMP3]], ptr [[ARRAYIDX1]], align 8 +; TFALWAYS-NEXT: [[TMP24:%.*]] = phi i64 [ [[TMP22]], [[IF_THEN]] ], [ [[TMP23]], [[IF_ELSE]] ] +; TFALWAYS-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: store i64 [[TMP24]], ptr [[ARRAYIDX1]], align 8 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TFALWAYS: for.cond.cleanup: ; TFALWAYS-NEXT: ret void ; ; TFFALLBACK-LABEL: @test_widen_if_then_else( ; TFFALLBACK-NEXT: entry: +; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFFALLBACK-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFFALLBACK-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFFALLBACK: vector.ph: +; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFFALLBACK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFFALLBACK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFFALLBACK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) +; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] +; TFFALLBACK: vector.body: +; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP8]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFFALLBACK-NEXT: [[TMP9:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP10:%.*]] = xor [[TMP9]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP11:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP10]], zeroinitializer +; TFFALLBACK-NEXT: [[TMP12:%.*]] = call @foo_vector( zeroinitializer, [[TMP11]]) +; TFFALLBACK-NEXT: [[TMP13:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP9]], zeroinitializer +; TFFALLBACK-NEXT: [[TMP14:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP13]]) +; TFFALLBACK-NEXT: [[PREDPHI:%.*]] = select [[TMP11]], [[TMP12]], [[TMP14]] +; TFFALLBACK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP16:%.*]] = or [[TMP11]], [[TMP13]] +; TFFALLBACK-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[PREDPHI]], ptr [[TMP15]], i32 8, [[TMP16]]) +; TFFALLBACK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 2 +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP18]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFFALLBACK-NEXT: [[TMP19:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP20:%.*]] = extractelement [[TMP19]], i32 0 +; TFFALLBACK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFFALLBACK: middle.block: +; TFFALLBACK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFFALLBACK: scalar.ph: +; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] ; TFFALLBACK: for.body: -; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; TFFALLBACK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: [[TMP21:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 +; TFFALLBACK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP21]], 50 ; TFFALLBACK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; TFFALLBACK: if.then: -; TFFALLBACK-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR3:[0-9]+]] +; TFFALLBACK-NEXT: [[TMP22:%.*]] = call i64 @foo(i64 [[TMP21]]) #[[ATTR5:[0-9]+]] ; TFFALLBACK-NEXT: br label [[IF_END]] ; TFFALLBACK: if.else: -; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @foo(i64 0) #[[ATTR3]] +; TFFALLBACK-NEXT: [[TMP23:%.*]] = call i64 @foo(i64 0) #[[ATTR5]] ; TFFALLBACK-NEXT: br label [[IF_END]] ; TFFALLBACK: if.end: -; TFFALLBACK-NEXT: [[TMP3:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ [[TMP2]], [[IF_ELSE]] ] -; TFFALLBACK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: store i64 [[TMP3]], ptr [[ARRAYIDX1]], align 8 +; TFFALLBACK-NEXT: [[TMP24:%.*]] = phi i64 [ [[TMP22]], [[IF_THEN]] ], [ [[TMP23]], [[IF_ELSE]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: store i64 [[TMP24]], ptr [[ARRAYIDX1]], align 8 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TFFALLBACK: for.cond.cleanup: ; TFFALLBACK-NEXT: ret void ; @@ -359,7 +623,7 @@ ; TFNONE-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2 ; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; TFNONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TFNONE-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFNONE-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TFNONE: middle.block: ; TFNONE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; TFNONE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] @@ -375,7 +639,7 @@ ; TFNONE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; @@ -386,7 +650,7 @@ ; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] ; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4 -; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR3:[0-9]+]] +; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR6:[0-9]+]] ; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] ; TFALWAYS-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -418,7 +682,7 @@ ; TFFALLBACK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2 ; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; TFFALLBACK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TFFALLBACK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFFALLBACK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TFFALLBACK: middle.block: ; TFFALLBACK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; TFFALLBACK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] @@ -429,12 +693,12 @@ ; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4 -; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4:[0-9]+]] +; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR6:[0-9]+]] ; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFFALLBACK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TFFALLBACK: for.cond.cleanup: ; TFFALLBACK-NEXT: ret void ; @@ -483,7 +747,7 @@ ; TFNONE-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2 ; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; TFNONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TFNONE-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFNONE-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; TFNONE: middle.block: ; TFNONE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; TFNONE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] @@ -499,23 +763,57 @@ ; TFNONE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; ; TFALWAYS-LABEL: @test_widen_optmask( ; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFALWAYS-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFALWAYS-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFALWAYS: vector.ph: +; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFALWAYS-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFALWAYS-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFALWAYS-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFALWAYS-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFALWAYS-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) +; TFALWAYS-NEXT: br label [[VECTOR_BODY:%.*]] +; TFALWAYS: vector.body: +; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP8]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; TFALWAYS-NEXT: [[TMP9:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) +; TFALWAYS-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP9]], ptr [[TMP10]], i32 4, [[ACTIVE_LANE_MASK]]) +; TFALWAYS-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 2 +; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP12]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFALWAYS-NEXT: [[TMP13:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP14:%.*]] = extractelement [[TMP13]], i32 0 +; TFALWAYS-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; TFALWAYS: middle.block: +; TFALWAYS-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFALWAYS: scalar.ph: +; TFALWAYS-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] ; TFALWAYS: for.body: -; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4 -; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4:[0-9]+]] -; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR7:[0-9]+]] +; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFALWAYS-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TFALWAYS: for.cond.cleanup: ; TFALWAYS-NEXT: ret void ; @@ -523,29 +821,36 @@ ; TFFALLBACK-NEXT: entry: ; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 -; TFFALLBACK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; TFFALLBACK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFFALLBACK-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFFALLBACK-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFFALLBACK: vector.ph: -; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 -; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFFALLBACK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFFALLBACK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFFALLBACK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) ; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] ; TFFALLBACK: vector.body: ; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; TFFALLBACK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] -; TFFALLBACK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP4]], align 4 -; TFFALLBACK-NEXT: [[TMP5:%.*]] = call @foo_vector_nomask( [[WIDE_LOAD]]) -; TFFALLBACK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; TFFALLBACK-NEXT: store [[TMP5]], ptr [[TMP6]], align 4 -; TFFALLBACK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; TFFALLBACK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2 -; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] -; TFFALLBACK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TFFALLBACK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP8]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; TFFALLBACK-NEXT: [[TMP9:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) +; TFFALLBACK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP9]], ptr [[TMP10]], i32 4, [[ACTIVE_LANE_MASK]]) +; TFFALLBACK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 2 +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP12]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFFALLBACK-NEXT: [[TMP13:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP14:%.*]] = extractelement [[TMP13]], i32 0 +; TFFALLBACK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; TFFALLBACK: middle.block: -; TFFALLBACK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; TFFALLBACK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFFALLBACK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFFALLBACK: scalar.ph: ; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] @@ -553,12 +858,12 @@ ; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4 -; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] +; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR7:[0-9]+]] ; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFFALLBACK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; TFFALLBACK: for.cond.cleanup: ; TFFALLBACK-NEXT: ret void ; Index: llvm/test/Transforms/LoopVectorize/scalarize-masked-call.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/scalarize-masked-call.ll +++ llvm/test/Transforms/LoopVectorize/scalarize-masked-call.ll @@ -6,23 +6,63 @@ define void @cond_call(ptr readonly %src, ptr noalias %dest, i64 %N) { ; CHECK-LABEL: @cond_call( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 2 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 2 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_CALL_CONTINUE2:%.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[SRC:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP0]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i64> [[WIDE_LOAD]], +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0 +; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_CALL_IF:%.*]], label [[PRED_CALL_CONTINUE:%.*]] +; CHECK: pred.call.if: +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 0 +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @foo(i64 [[TMP3]]) #[[ATTR0:[0-9]+]] +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> poison, i64 [[TMP4]], i32 0 +; CHECK-NEXT: br label [[PRED_CALL_CONTINUE]] +; CHECK: pred.call.continue: +; CHECK-NEXT: [[TMP6:%.*]] = phi <2 x i64> [ poison, [[VECTOR_BODY]] ], [ [[TMP5]], [[PRED_CALL_IF]] ] +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1 +; CHECK-NEXT: br i1 [[TMP7]], label [[PRED_CALL_IF1:%.*]], label [[PRED_CALL_CONTINUE2]] +; CHECK: pred.call.if1: +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 1 +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @foo(i64 [[TMP8]]) #[[ATTR0]] +; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[TMP9]], i32 1 +; CHECK-NEXT: br label [[PRED_CALL_CONTINUE2]] +; CHECK: pred.call.continue2: +; CHECK-NEXT: [[TMP11:%.*]] = phi <2 x i64> [ [[TMP6]], [[PRED_CALL_CONTINUE]] ], [ [[TMP10]], [[PRED_CALL_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP1]], <2 x i64> [[TMP11]], <2 x i64> [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[DEST:%.*]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i64> [[PREDPHI]], ptr [[TMP12]], align 8 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_LOOP:%.*]] ] -; CHECK-NEXT: [[LD_ADDR:%.*]] = getelementptr inbounds i64, ptr [[SRC:%.*]], i64 [[IV]] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_LOOP:%.*]] ] +; CHECK-NEXT: [[LD_ADDR:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]] ; CHECK-NEXT: [[LD_VALUE:%.*]] = load i64, ptr [[LD_ADDR]], align 8 ; CHECK-NEXT: [[IFCOND:%.*]] = icmp ult i64 [[LD_VALUE]], 5 ; CHECK-NEXT: br i1 [[IFCOND]], label [[IF_THEN:%.*]], label [[FOR_LOOP]] ; CHECK: if.then: -; CHECK-NEXT: [[FOO_RET:%.*]] = call i64 @foo(i64 [[LD_VALUE]]) +; CHECK-NEXT: [[FOO_RET:%.*]] = call i64 @foo(i64 [[LD_VALUE]]) #[[ATTR0]] ; CHECK-NEXT: br label [[FOR_LOOP]] ; CHECK: for.loop: ; CHECK-NEXT: [[ST_VALUE:%.*]] = phi i64 [ [[LD_VALUE]], [[FOR_BODY]] ], [ [[FOO_RET]], [[IF_THEN]] ] -; CHECK-NEXT: [[ST_ADDR:%.*]] = getelementptr inbounds i64, ptr [[DEST:%.*]], i64 [[IV]] +; CHECK-NEXT: [[ST_ADDR:%.*]] = getelementptr inbounds i64, ptr [[DEST]], i64 [[IV]] ; CHECK-NEXT: store i64 [[ST_VALUE]], ptr [[ST_ADDR]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[LOOPCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] -; CHECK-NEXT: br i1 [[LOOPCOND]], label [[END:%.*]], label [[FOR_BODY]] +; CHECK-NEXT: [[LOOPCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[LOOPCOND]], label [[END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: end: ; CHECK-NEXT: ret void ; @@ -37,7 +77,7 @@ br i1 %ifcond, label %if.then, label %for.loop if.then: - %foo.ret = call i64 @foo(i64 %ld.value) + %foo.ret = call i64 @foo(i64 %ld.value) #0 br label %for.loop for.loop: @@ -52,8 +92,8 @@ ret void } -declare i64 @foo(i64) -declare <4 x i64> @vector_foo(<4 x i64>) +declare i64 @foo(i64) #0 +declare <4 x i64> @vector_foo(<4 x i64>, <4 x i1>) ; We need a vector variant in order to allow for vectorization at present, but ; we want to test scalarization of conditional calls. If we provide a variant