Index: llvm/include/llvm/Analysis/VectorUtils.h =================================================================== --- llvm/include/llvm/Analysis/VectorUtils.h +++ llvm/include/llvm/Analysis/VectorUtils.h @@ -277,6 +277,22 @@ return Ret; } + static bool hasMaskedVariant(const CallInst &CI, + Optional VF = None) { + // Check whether we have at least one masked vector version of a scalar + // function. + bool HasMaskedVersion = false; + + // If no VF is specified then we check for any masked variant, otherwise + // we look for one that matches the supplied VF. + auto Mappings = VFDatabase::getMappings(CI); + for (VFInfo Info : Mappings) + if (!VF.has_value() || Info.Shape.VF == VF.value()) + HasMaskedVersion |= Info.isMasked(); + + return HasMaskedVersion; + } + /// Constructor, requires a CallInst instance. VFDatabase(CallInst &CI) : M(CI.getModule()), CI(CI), Index: llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -1128,20 +1128,11 @@ // if we end up scalarizing due to the cost model calculations. // TODO: Allow other calls if they have appropriate attributes... readonly // and argmemonly? - if (CallInst *CI = dyn_cast(&I)) { - // Check whether we have at least one masked vector version of a scalar - // function. - bool HasMaskedVersion = false; - - auto Mappings = VFDatabase::getMappings(*CI); - for (VFInfo Info : Mappings) - HasMaskedVersion |= Info.isMasked(); - - if (HasMaskedVersion) { + if (CallInst *CI = dyn_cast(&I)) + if (VFDatabase::hasMaskedVariant(*CI)) { MaskedOp.insert(CI); continue; } - } // Loads are handled via masking (or speculated if safe to do so.) if (auto *LI = dyn_cast(&I)) { Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -3469,12 +3469,13 @@ // cost is the cost we need to return. NeedToScalarize = true; InstructionCost MaskCost = 0; - VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); + VFShape Shape = VFShape::get(*CI, VF, Legal->isMaskRequired(CI)); Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); // If we want an unmasked vector function but can't find one matching the VF, // and the target supports an active lane mask, maybe we can find vector // function that does use a mask and synthesize an all-true mask. - if (!VecFunc && TTI.emitGetActiveLaneMask() != PredicationStyle::None) { + if (!VecFunc && !Legal->isMaskRequired(CI) && + TTI.emitGetActiveLaneMask() != PredicationStyle::None) { Shape = VFShape::get(*CI, VF, /*HasGlobalPred=*/true); VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); // If we found one, add in the cost of creating a mask @@ -3490,9 +3491,7 @@ VF)); } - // We don't support masked function calls yet, but we can scalarize a - // masked call with branches. - if (!TLI || CI->isNoBuiltin() || !VecFunc || Legal->isMaskRequired(CI)) + if (!TLI || CI->isNoBuiltin() || !VecFunc) return Cost; // If the corresponding vector cost is cheaper, return its cost. @@ -4408,6 +4407,8 @@ switch(I->getOpcode()) { default: return true; + case Instruction::Call: + return !VFDatabase::hasMaskedVariant(*(cast(I)), VF); case Instruction::Load: case Instruction::Store: { auto *Ptr = getLoadStorePointerOperand(I); @@ -8320,7 +8321,8 @@ VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, ArrayRef Operands, - VFRange &Range) const { + VFRange &Range, + VPlanPtr &Plan) { bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( [this, CI](ElementCount VF) { @@ -8331,6 +8333,10 @@ if (IsPredicated) return nullptr; + VPValue *Mask = nullptr; + if (Legal->isMaskRequired(CI)) + Mask = createBlockInMask(CI->getParent(), Plan); + Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || @@ -8371,7 +8377,7 @@ Range); if (ShouldUseVectorCall) return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()), - Intrinsic::not_intrinsic); + Intrinsic::not_intrinsic, Mask); return nullptr; } @@ -8641,7 +8647,7 @@ return nullptr; if (auto *CI = dyn_cast(Instr)) - return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); + return toVPRecipeResult(tryToWidenCall(CI, Operands, Range, Plan)); if (isa(Instr) || isa(Instr)) return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); Index: llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h =================================================================== --- llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h +++ llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h @@ -95,7 +95,7 @@ /// return a new VPWidenCallRecipe. Range.End may be decreased to ensure same /// decision from \p Range.Start to \p Range.End. VPWidenCallRecipe *tryToWidenCall(CallInst *CI, ArrayRef Operands, - VFRange &Range) const; + VFRange &Range, VPlanPtr &Plan); /// Check if \p I has an opcode that can be widened and return a VPWidenRecipe /// if it can. The function should only be called if the cost-model indicates Index: llvm/lib/Transforms/Vectorize/VPlan.h =================================================================== --- llvm/lib/Transforms/Vectorize/VPlan.h +++ llvm/lib/Transforms/Vectorize/VPlan.h @@ -953,14 +953,18 @@ /// ID of the vector intrinsic to call when widening the call. If set the /// Intrinsic::not_intrinsic, a library call will be used instead. Intrinsic::ID VectorIntrinsicID; + bool MaskRequired; public: template VPWidenCallRecipe(CallInst &I, iterator_range CallArguments, - Intrinsic::ID VectorIntrinsicID) + Intrinsic::ID VectorIntrinsicID, VPValue *MaskVal = nullptr) : VPRecipeBase(VPRecipeBase::VPWidenCallSC, CallArguments), VPValue(VPValue::VPVWidenCallSC, &I, this), - VectorIntrinsicID(VectorIntrinsicID) {} + VectorIntrinsicID(VectorIntrinsicID), MaskRequired(MaskVal != nullptr) { + if (MaskVal) + addOperand(MaskVal); + } ~VPWidenCallRecipe() override = default; Index: llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -440,6 +440,13 @@ "DbgInfoIntrinsic should have been dropped during VPlan construction"); State.setDebugLocFromInst(&CI); + // If we added a mask operand in the recipe, extract it so that we can + // insert it in the right position for the vectorized call. The mask isn't + // guaranteed to be the last argument. + VPValue *VPMask = nullptr; + if (MaskRequired) + VPMask = removeAndReturnLastOperand(); + for (unsigned Part = 0; Part < State.UF; ++Part) { SmallVector TysForDecl = {CI.getType()}; SmallVector Args; @@ -470,13 +477,13 @@ assert(VectorF && "Can't retrieve vector intrinsic."); } else { // Use vector version of the function call. - VFShape Shape = VFShape::get(CI, State.VF, /*HasGlobalPred=*/false); + VFShape Shape = VFShape::get(CI, State.VF, MaskRequired); VectorF = VFDatabase(CI).getVectorizedFunction(Shape); // TODO: Do we need TTI checks for masking here? Or can we // assume it works by this point? Maybe add to the recipe... - if (!VectorF) { + if (!VectorF && !MaskRequired) { Shape = VFShape::get(CI, State.VF, /*HasGlobalPred=*/true); VectorF = VFDatabase(CI).getVectorizedFunction(Shape); } @@ -494,12 +501,19 @@ } } + assert((!MaskRequired || VectorFTakesMask) && + "Mask supplied for function with no mask argument"); + // If the function takes a mask parameter, we need to synthesize one // that's true for all lanes. if (VectorFTakesMask) { - Value *Mask = ConstantInt::getTrue(VectorType::get( - IntegerType::getInt1Ty(VectorF->getFunctionType()->getContext()), - State.VF)); + Value *Mask = nullptr; + if (VPMask) + Mask = State.get(VPMask, Part); + else + Mask = ConstantInt::getTrue(VectorType::get( + IntegerType::getInt1Ty(VectorF->getFunctionType()->getContext()), + State.VF)); Args.insert(Args.begin() + VectorFMaskPos, Mask); } Index: llvm/lib/Transforms/Vectorize/VPlanValue.h =================================================================== --- llvm/lib/Transforms/Vectorize/VPlanValue.h +++ llvm/lib/Transforms/Vectorize/VPlanValue.h @@ -273,6 +273,12 @@ Op->removeUser(*this); } + VPValue *removeAndReturnLastOperand() { + VPValue *Op = Operands.pop_back_val(); + Op->removeUser(*this); + return Op; + } + typedef SmallVectorImpl::iterator operand_iterator; typedef SmallVectorImpl::const_iterator const_operand_iterator; typedef iterator_range operand_range; Index: llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll +++ llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll @@ -3,6 +3,7 @@ ; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -instsimplify -S | FileCheck %s --check-prefixes=TFALWAYS ; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -instsimplify -S | FileCheck %s --check-prefixes=TFFALLBACK +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64-unknown-linux-gnu" ; A call whose argument must be widened. We check that tail folding uses the @@ -24,11 +25,11 @@ ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFNONE-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to * -; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 4 +; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 8 ; TFNONE-NEXT: [[TMP6:%.*]] = call @foo_vector( [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer)) ; TFNONE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[TMP8:%.*]] = bitcast i64* [[TMP7]] to * -; TFNONE-NEXT: store [[TMP6]], * [[TMP8]], align 4 +; TFNONE-NEXT: store [[TMP6]], * [[TMP8]], align 8 ; TFNONE-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 ; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] @@ -43,10 +44,10 @@ ; TFNONE: for.body: ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFNONE-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 8 ; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR2:[0-9]+]] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFNONE-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 ; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] @@ -55,53 +56,50 @@ ; ; TFALWAYS-LABEL: @test_widen( ; TFALWAYS-NEXT: entry: -; TFALWAYS-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFALWAYS-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFALWAYS-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFALWAYS-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFALWAYS: vector.ph: +; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFALWAYS-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFALWAYS-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFALWAYS-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFALWAYS-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFALWAYS-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) ; TFALWAYS-NEXT: br label [[VECTOR_BODY:%.*]] ; TFALWAYS: vector.body: -; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_CALL_CONTINUE2:%.*]] ] -; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <2 x i1> [ , [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_CALL_CONTINUE2]] ] -; TFALWAYS-NEXT: [[TMP0:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] -; TFALWAYS-NEXT: [[TMP1:%.*]] = bitcast i64* [[TMP0]] to <2 x i64>* -; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* [[TMP1]], i32 4, <2 x i1> [[ACTIVE_LANE_MASK]], <2 x i64> poison) -; TFALWAYS-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 0 -; TFALWAYS-NEXT: br i1 [[TMP2]], label [[PRED_CALL_IF:%.*]], label [[PRED_CALL_CONTINUE:%.*]] -; TFALWAYS: pred.call.if: -; TFALWAYS-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[WIDE_MASKED_LOAD]], i32 0 -; TFALWAYS-NEXT: [[TMP4:%.*]] = call i64 @foo(i64 [[TMP3]]) #[[ATTR4:[0-9]+]] -; TFALWAYS-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> poison, i64 [[TMP4]], i32 0 -; TFALWAYS-NEXT: br label [[PRED_CALL_CONTINUE]] -; TFALWAYS: pred.call.continue: -; TFALWAYS-NEXT: [[TMP6:%.*]] = phi <2 x i64> [ poison, [[VECTOR_BODY]] ], [ [[TMP5]], [[PRED_CALL_IF]] ] -; TFALWAYS-NEXT: [[TMP7:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 1 -; TFALWAYS-NEXT: br i1 [[TMP7]], label [[PRED_CALL_IF1:%.*]], label [[PRED_CALL_CONTINUE2]] -; TFALWAYS: pred.call.if1: -; TFALWAYS-NEXT: [[TMP8:%.*]] = extractelement <2 x i64> [[WIDE_MASKED_LOAD]], i32 1 -; TFALWAYS-NEXT: [[TMP9:%.*]] = call i64 @foo(i64 [[TMP8]]) #[[ATTR4]] -; TFALWAYS-NEXT: [[TMP10:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[TMP9]], i32 1 -; TFALWAYS-NEXT: br label [[PRED_CALL_CONTINUE2]] -; TFALWAYS: pred.call.continue2: -; TFALWAYS-NEXT: [[TMP11:%.*]] = phi <2 x i64> [ [[TMP6]], [[PRED_CALL_CONTINUE]] ], [ [[TMP10]], [[PRED_CALL_IF1]] ] -; TFALWAYS-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] -; TFALWAYS-NEXT: [[TMP13:%.*]] = bitcast i64* [[TMP12]] to <2 x i64>* -; TFALWAYS-NEXT: call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> [[TMP11]], <2 x i64>* [[TMP13]], i32 4, <2 x i1> [[ACTIVE_LANE_MASK]]) -; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 -; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[INDEX_NEXT]], i64 1024) -; TFALWAYS-NEXT: [[TMP14:%.*]] = xor <2 x i1> [[ACTIVE_LANE_MASK_NEXT]], -; TFALWAYS-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP14]], i32 0 -; TFALWAYS-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[TMP8:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[TMP9:%.*]] = bitcast i64* [[TMP8]] to * +; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0nxv2i64(* [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFALWAYS-NEXT: [[TMP10:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) +; TFALWAYS-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[TMP12:%.*]] = bitcast i64* [[TMP11]] to * +; TFALWAYS-NEXT: call void @llvm.masked.store.nxv2i64.p0nxv2i64( [[TMP10]], * [[TMP12]], i32 8, [[ACTIVE_LANE_MASK]]) +; TFALWAYS-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 2 +; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP14]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFALWAYS-NEXT: [[TMP15:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP16:%.*]] = extractelement [[TMP15]], i32 0 +; TFALWAYS-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TFALWAYS: middle.block: ; TFALWAYS-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFALWAYS: scalar.ph: -; TFALWAYS-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; TFALWAYS-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] ; TFALWAYS: for.body: ; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 -; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4]] +; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 8 +; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4:[0-9]+]] ; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFALWAYS-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 8 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 ; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] @@ -112,59 +110,48 @@ ; TFFALLBACK-NEXT: entry: ; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 -; TFFALLBACK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; TFFALLBACK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFFALLBACK-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFFALLBACK-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFFALLBACK: vector.ph: -; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 -; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFFALLBACK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFFALLBACK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFFALLBACK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) ; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] ; TFFALLBACK: vector.body: -; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_CALL_CONTINUE2:%.*]] ] -; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <2 x i1> [ , [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_CALL_CONTINUE2]] ] -; TFFALLBACK-NEXT: [[TMP0:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] -; TFFALLBACK-NEXT: [[TMP1:%.*]] = bitcast i64* [[TMP0]] to <2 x i64>* -; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* [[TMP1]], i32 4, <2 x i1> [[ACTIVE_LANE_MASK]], <2 x i64> poison) -; TFFALLBACK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 0 -; TFFALLBACK-NEXT: br i1 [[TMP2]], label [[PRED_CALL_IF:%.*]], label [[PRED_CALL_CONTINUE:%.*]] -; TFFALLBACK: pred.call.if: -; TFFALLBACK-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[WIDE_MASKED_LOAD]], i32 0 -; TFFALLBACK-NEXT: [[TMP4:%.*]] = call i64 @foo(i64 [[TMP3]]) #[[ATTR4:[0-9]+]] -; TFFALLBACK-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> poison, i64 [[TMP4]], i32 0 -; TFFALLBACK-NEXT: br label [[PRED_CALL_CONTINUE]] -; TFFALLBACK: pred.call.continue: -; TFFALLBACK-NEXT: [[TMP6:%.*]] = phi <2 x i64> [ poison, [[VECTOR_BODY]] ], [ [[TMP5]], [[PRED_CALL_IF]] ] -; TFFALLBACK-NEXT: [[TMP7:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 1 -; TFFALLBACK-NEXT: br i1 [[TMP7]], label [[PRED_CALL_IF1:%.*]], label [[PRED_CALL_CONTINUE2]] -; TFFALLBACK: pred.call.if1: -; TFFALLBACK-NEXT: [[TMP8:%.*]] = extractelement <2 x i64> [[WIDE_MASKED_LOAD]], i32 1 -; TFFALLBACK-NEXT: [[TMP9:%.*]] = call i64 @foo(i64 [[TMP8]]) #[[ATTR4]] -; TFFALLBACK-NEXT: [[TMP10:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[TMP9]], i32 1 -; TFFALLBACK-NEXT: br label [[PRED_CALL_CONTINUE2]] -; TFFALLBACK: pred.call.continue2: -; TFFALLBACK-NEXT: [[TMP11:%.*]] = phi <2 x i64> [ [[TMP6]], [[PRED_CALL_CONTINUE]] ], [ [[TMP10]], [[PRED_CALL_IF1]] ] -; TFFALLBACK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] -; TFFALLBACK-NEXT: [[TMP13:%.*]] = bitcast i64* [[TMP12]] to <2 x i64>* -; TFFALLBACK-NEXT: call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> [[TMP11]], <2 x i64>* [[TMP13]], i32 4, <2 x i1> [[ACTIVE_LANE_MASK]]) -; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 -; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[INDEX_NEXT]], i64 1024) -; TFFALLBACK-NEXT: [[TMP14:%.*]] = xor <2 x i1> [[ACTIVE_LANE_MASK_NEXT]], -; TFFALLBACK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP14]], i32 0 -; TFFALLBACK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP9:%.*]] = bitcast i64* [[TMP8]] to * +; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0nxv2i64(* [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFFALLBACK-NEXT: [[TMP10:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) +; TFFALLBACK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP12:%.*]] = bitcast i64* [[TMP11]] to * +; TFFALLBACK-NEXT: call void @llvm.masked.store.nxv2i64.p0nxv2i64( [[TMP10]], * [[TMP12]], i32 8, [[ACTIVE_LANE_MASK]]) +; TFFALLBACK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 2 +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP14]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFFALLBACK-NEXT: [[TMP15:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP16:%.*]] = extractelement [[TMP15]], i32 0 +; TFFALLBACK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TFFALLBACK: middle.block: -; TFFALLBACK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; TFFALLBACK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFFALLBACK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFFALLBACK: scalar.ph: ; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] ; TFFALLBACK: for.body: ; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 -; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4]] +; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 8 +; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4:[0-9]+]] ; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFFALLBACK-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 8 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 ; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] @@ -193,67 +180,183 @@ define void @test_if_then(i64* noalias %a, i64* readnone %b) #4 { ; TFNONE-LABEL: @test_if_then( ; TFNONE-NEXT: entry: +; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFNONE: vector.ph: +; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 +; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] +; TFNONE: vector.body: +; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFNONE-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to * +; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 8 +; TFNONE-NEXT: [[TMP6:%.*]] = icmp ugt [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i64 50, i32 0), poison, zeroinitializer) +; TFNONE-NEXT: [[TMP7:%.*]] = call @foo_vector( [[WIDE_LOAD]], [[TMP6]]) +; TFNONE-NEXT: [[TMP8:%.*]] = xor [[TMP6]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFNONE-NEXT: [[PREDPHI:%.*]] = select [[TMP8]], zeroinitializer, [[TMP7]] +; TFNONE-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[TMP10:%.*]] = bitcast i64* [[TMP9]] to * +; TFNONE-NEXT: store [[PREDPHI]], * [[TMP10]], align 8 +; TFNONE-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 2 +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; TFNONE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; TFNONE-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFNONE: middle.block: +; TFNONE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; TFNONE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFNONE: scalar.ph: +; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFNONE-NEXT: br label [[FOR_BODY:%.*]] ; TFNONE: for.body: -; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 -; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: [[TMP14:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP14]], 50 ; TFNONE-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] ; TFNONE: if.then: -; TFNONE-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR2]] +; TFNONE-NEXT: [[TMP15:%.*]] = call i64 @foo(i64 [[TMP14]]) #[[ATTR2]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.end: -; TFNONE-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] -; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: store i64 [[TMP2]], i64* [[ARRAYIDX1]], align 8 +; TFNONE-NEXT: [[TMP16:%.*]] = phi i64 [ [[TMP15]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] +; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: store i64 [[TMP16]], i64* [[ARRAYIDX1]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; ; TFALWAYS-LABEL: @test_if_then( ; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFALWAYS-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFALWAYS-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFALWAYS: vector.ph: +; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFALWAYS-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFALWAYS-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFALWAYS-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFALWAYS-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFALWAYS-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) +; TFALWAYS-NEXT: br label [[VECTOR_BODY:%.*]] +; TFALWAYS: vector.body: +; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[TMP9:%.*]] = bitcast i64* [[TMP8]] to * +; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0nxv2i64(* [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFALWAYS-NEXT: [[TMP10:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i32 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP11:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP10]], zeroinitializer +; TFALWAYS-NEXT: [[TMP12:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP11]]) +; TFALWAYS-NEXT: [[TMP13:%.*]] = xor [[TMP10]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP14:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP13]], zeroinitializer +; TFALWAYS-NEXT: [[PREDPHI:%.*]] = select [[TMP14]], zeroinitializer, [[TMP12]] +; TFALWAYS-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[TMP16:%.*]] = or [[TMP11]], [[TMP14]] +; TFALWAYS-NEXT: [[TMP17:%.*]] = bitcast i64* [[TMP15]] to * +; TFALWAYS-NEXT: call void @llvm.masked.store.nxv2i64.p0nxv2i64( [[PREDPHI]], * [[TMP17]], i32 8, [[TMP16]]) +; TFALWAYS-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 2 +; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP19]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFALWAYS-NEXT: [[TMP20:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP21:%.*]] = extractelement [[TMP20]], i32 0 +; TFALWAYS-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFALWAYS: middle.block: +; TFALWAYS-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFALWAYS: scalar.ph: +; TFALWAYS-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] ; TFALWAYS: for.body: -; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 -; TFALWAYS-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[TMP22:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFALWAYS-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP22]], 50 ; TFALWAYS-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] ; TFALWAYS: if.then: -; TFALWAYS-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR4]] +; TFALWAYS-NEXT: [[TMP23:%.*]] = call i64 @foo(i64 [[TMP22]]) #[[ATTR4]] ; TFALWAYS-NEXT: br label [[IF_END]] ; TFALWAYS: if.end: -; TFALWAYS-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] -; TFALWAYS-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: store i64 [[TMP2]], i64* [[ARRAYIDX1]], align 8 +; TFALWAYS-NEXT: [[TMP24:%.*]] = phi i64 [ [[TMP23]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] +; TFALWAYS-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: store i64 [[TMP24]], i64* [[ARRAYIDX1]], align 8 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TFALWAYS: for.cond.cleanup: ; TFALWAYS-NEXT: ret void ; ; TFFALLBACK-LABEL: @test_if_then( ; TFFALLBACK-NEXT: entry: +; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFFALLBACK-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFFALLBACK-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFFALLBACK: vector.ph: +; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFFALLBACK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFFALLBACK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFFALLBACK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) +; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] +; TFFALLBACK: vector.body: +; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP9:%.*]] = bitcast i64* [[TMP8]] to * +; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0nxv2i64(* [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFFALLBACK-NEXT: [[TMP10:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i32 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP11:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP10]], zeroinitializer +; TFFALLBACK-NEXT: [[TMP12:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP11]]) +; TFFALLBACK-NEXT: [[TMP13:%.*]] = xor [[TMP10]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP14:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP13]], zeroinitializer +; TFFALLBACK-NEXT: [[PREDPHI:%.*]] = select [[TMP14]], zeroinitializer, [[TMP12]] +; TFFALLBACK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP16:%.*]] = or [[TMP11]], [[TMP14]] +; TFFALLBACK-NEXT: [[TMP17:%.*]] = bitcast i64* [[TMP15]] to * +; TFFALLBACK-NEXT: call void @llvm.masked.store.nxv2i64.p0nxv2i64( [[PREDPHI]], * [[TMP17]], i32 8, [[TMP16]]) +; TFFALLBACK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 2 +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP19]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFFALLBACK-NEXT: [[TMP20:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP21:%.*]] = extractelement [[TMP20]], i32 0 +; TFFALLBACK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFFALLBACK: middle.block: +; TFFALLBACK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFFALLBACK: scalar.ph: +; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] ; TFFALLBACK: for.body: -; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 -; TFFALLBACK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: [[TMP22:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFFALLBACK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP22]], 50 ; TFFALLBACK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] ; TFFALLBACK: if.then: -; TFFALLBACK-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR4]] +; TFFALLBACK-NEXT: [[TMP23:%.*]] = call i64 @foo(i64 [[TMP22]]) #[[ATTR4]] ; TFFALLBACK-NEXT: br label [[IF_END]] ; TFFALLBACK: if.end: -; TFFALLBACK-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] -; TFFALLBACK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: store i64 [[TMP2]], i64* [[ARRAYIDX1]], align 8 +; TFFALLBACK-NEXT: [[TMP24:%.*]] = phi i64 [ [[TMP23]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: store i64 [[TMP24]], i64* [[ARRAYIDX1]], align 8 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TFFALLBACK: for.cond.cleanup: ; TFFALLBACK-NEXT: ret void ; @@ -290,76 +393,195 @@ define void @test_widen_if_then_else(i64* noalias %a, i64* readnone %b) #4 { ; TFNONE-LABEL: @test_widen_if_then_else( ; TFNONE-NEXT: entry: +; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFNONE: vector.ph: +; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 +; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] +; TFNONE: vector.body: +; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFNONE-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to * +; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 8 +; TFNONE-NEXT: [[TMP6:%.*]] = icmp ugt [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i64 50, i32 0), poison, zeroinitializer) +; TFNONE-NEXT: [[TMP7:%.*]] = xor [[TMP6]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFNONE-NEXT: [[TMP8:%.*]] = call @foo_vector( zeroinitializer, [[TMP7]]) +; TFNONE-NEXT: [[TMP9:%.*]] = call @foo_vector( [[WIDE_LOAD]], [[TMP6]]) +; TFNONE-NEXT: [[PREDPHI:%.*]] = select [[TMP7]], [[TMP8]], [[TMP9]] +; TFNONE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[TMP11:%.*]] = bitcast i64* [[TMP10]] to * +; TFNONE-NEXT: store [[PREDPHI]], * [[TMP11]], align 8 +; TFNONE-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 2 +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]] +; TFNONE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; TFNONE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFNONE: middle.block: +; TFNONE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; TFNONE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFNONE: scalar.ph: +; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFNONE-NEXT: br label [[FOR_BODY:%.*]] ; TFNONE: for.body: -; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 -; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: [[TMP15:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP15]], 50 ; TFNONE-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; TFNONE: if.then: -; TFNONE-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR3:[0-9]+]] +; TFNONE-NEXT: [[TMP16:%.*]] = call i64 @foo(i64 [[TMP15]]) #[[ATTR3:[0-9]+]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.else: -; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @foo(i64 0) #[[ATTR3]] +; TFNONE-NEXT: [[TMP17:%.*]] = call i64 @foo(i64 0) #[[ATTR3]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.end: -; TFNONE-NEXT: [[TMP3:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ [[TMP2]], [[IF_ELSE]] ] -; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: store i64 [[TMP3]], i64* [[ARRAYIDX1]], align 8 +; TFNONE-NEXT: [[TMP18:%.*]] = phi i64 [ [[TMP16]], [[IF_THEN]] ], [ [[TMP17]], [[IF_ELSE]] ] +; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: store i64 [[TMP18]], i64* [[ARRAYIDX1]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; ; TFALWAYS-LABEL: @test_widen_if_then_else( ; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFALWAYS-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFALWAYS-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFALWAYS: vector.ph: +; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFALWAYS-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFALWAYS-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFALWAYS-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFALWAYS-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFALWAYS-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) +; TFALWAYS-NEXT: br label [[VECTOR_BODY:%.*]] +; TFALWAYS: vector.body: +; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[TMP9:%.*]] = bitcast i64* [[TMP8]] to * +; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0nxv2i64(* [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFALWAYS-NEXT: [[TMP10:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i32 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP11:%.*]] = xor [[TMP10]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP12:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP11]], zeroinitializer +; TFALWAYS-NEXT: [[TMP13:%.*]] = call @foo_vector( zeroinitializer, [[TMP12]]) +; TFALWAYS-NEXT: [[TMP14:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP10]], zeroinitializer +; TFALWAYS-NEXT: [[TMP15:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP14]]) +; TFALWAYS-NEXT: [[PREDPHI:%.*]] = select [[TMP12]], [[TMP13]], [[TMP15]] +; TFALWAYS-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[TMP17:%.*]] = or [[TMP12]], [[TMP14]] +; TFALWAYS-NEXT: [[TMP18:%.*]] = bitcast i64* [[TMP16]] to * +; TFALWAYS-NEXT: call void @llvm.masked.store.nxv2i64.p0nxv2i64( [[PREDPHI]], * [[TMP18]], i32 8, [[TMP17]]) +; TFALWAYS-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 2 +; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP20]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFALWAYS-NEXT: [[TMP21:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP22:%.*]] = extractelement [[TMP21]], i32 0 +; TFALWAYS-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFALWAYS: middle.block: +; TFALWAYS-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFALWAYS: scalar.ph: +; TFALWAYS-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] ; TFALWAYS: for.body: -; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 -; TFALWAYS-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[TMP23:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFALWAYS-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP23]], 50 ; TFALWAYS-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; TFALWAYS: if.then: -; TFALWAYS-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR5:[0-9]+]] +; TFALWAYS-NEXT: [[TMP24:%.*]] = call i64 @foo(i64 [[TMP23]]) #[[ATTR5:[0-9]+]] ; TFALWAYS-NEXT: br label [[IF_END]] ; TFALWAYS: if.else: -; TFALWAYS-NEXT: [[TMP2:%.*]] = call i64 @foo(i64 0) #[[ATTR5]] +; TFALWAYS-NEXT: [[TMP25:%.*]] = call i64 @foo(i64 0) #[[ATTR5]] ; TFALWAYS-NEXT: br label [[IF_END]] ; TFALWAYS: if.end: -; TFALWAYS-NEXT: [[TMP3:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ [[TMP2]], [[IF_ELSE]] ] -; TFALWAYS-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: store i64 [[TMP3]], i64* [[ARRAYIDX1]], align 8 +; TFALWAYS-NEXT: [[TMP26:%.*]] = phi i64 [ [[TMP24]], [[IF_THEN]] ], [ [[TMP25]], [[IF_ELSE]] ] +; TFALWAYS-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: store i64 [[TMP26]], i64* [[ARRAYIDX1]], align 8 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TFALWAYS: for.cond.cleanup: ; TFALWAYS-NEXT: ret void ; ; TFFALLBACK-LABEL: @test_widen_if_then_else( ; TFFALLBACK-NEXT: entry: +; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFFALLBACK-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFFALLBACK-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFFALLBACK: vector.ph: +; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFFALLBACK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFFALLBACK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFFALLBACK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) +; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] +; TFFALLBACK: vector.body: +; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP9:%.*]] = bitcast i64* [[TMP8]] to * +; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0nxv2i64(* [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFFALLBACK-NEXT: [[TMP10:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i32 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP11:%.*]] = xor [[TMP10]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP12:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP11]], zeroinitializer +; TFFALLBACK-NEXT: [[TMP13:%.*]] = call @foo_vector( zeroinitializer, [[TMP12]]) +; TFFALLBACK-NEXT: [[TMP14:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP10]], zeroinitializer +; TFFALLBACK-NEXT: [[TMP15:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP14]]) +; TFFALLBACK-NEXT: [[PREDPHI:%.*]] = select [[TMP12]], [[TMP13]], [[TMP15]] +; TFFALLBACK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP17:%.*]] = or [[TMP12]], [[TMP14]] +; TFFALLBACK-NEXT: [[TMP18:%.*]] = bitcast i64* [[TMP16]] to * +; TFFALLBACK-NEXT: call void @llvm.masked.store.nxv2i64.p0nxv2i64( [[PREDPHI]], * [[TMP18]], i32 8, [[TMP17]]) +; TFFALLBACK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 2 +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP20]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFFALLBACK-NEXT: [[TMP21:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP22:%.*]] = extractelement [[TMP21]], i32 0 +; TFFALLBACK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFFALLBACK: middle.block: +; TFFALLBACK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFFALLBACK: scalar.ph: +; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] ; TFFALLBACK: for.body: -; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 -; TFFALLBACK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: [[TMP23:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFFALLBACK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP23]], 50 ; TFFALLBACK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; TFFALLBACK: if.then: -; TFFALLBACK-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR5:[0-9]+]] +; TFFALLBACK-NEXT: [[TMP24:%.*]] = call i64 @foo(i64 [[TMP23]]) #[[ATTR5:[0-9]+]] ; TFFALLBACK-NEXT: br label [[IF_END]] ; TFFALLBACK: if.else: -; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @foo(i64 0) #[[ATTR5]] +; TFFALLBACK-NEXT: [[TMP25:%.*]] = call i64 @foo(i64 0) #[[ATTR5]] ; TFFALLBACK-NEXT: br label [[IF_END]] ; TFFALLBACK: if.end: -; TFFALLBACK-NEXT: [[TMP3:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ [[TMP2]], [[IF_ELSE]] ] -; TFFALLBACK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: store i64 [[TMP3]], i64* [[ARRAYIDX1]], align 8 +; TFFALLBACK-NEXT: [[TMP26:%.*]] = phi i64 [ [[TMP24]], [[IF_THEN]] ], [ [[TMP25]], [[IF_ELSE]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: store i64 [[TMP26]], i64* [[ARRAYIDX1]], align 8 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TFFALLBACK: for.cond.cleanup: ; TFFALLBACK-NEXT: ret void ; @@ -413,16 +635,16 @@ ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFNONE-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to * -; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 4 +; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 8 ; TFNONE-NEXT: [[TMP6:%.*]] = call @foo_vector_nomask( [[WIDE_LOAD]]) ; TFNONE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[TMP8:%.*]] = bitcast i64* [[TMP7]] to * -; TFNONE-NEXT: store [[TMP6]], * [[TMP8]], align 4 +; TFNONE-NEXT: store [[TMP6]], * [[TMP8]], align 8 ; TFNONE-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 ; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; TFNONE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TFNONE-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFNONE-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TFNONE: middle.block: ; TFNONE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; TFNONE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] @@ -432,13 +654,13 @@ ; TFNONE: for.body: ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFNONE-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 8 ; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4:[0-9]+]] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFNONE-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; @@ -448,10 +670,10 @@ ; TFALWAYS: for.body: ; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 8 ; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR6:[0-9]+]] ; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFALWAYS-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 8 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 ; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] @@ -474,16 +696,16 @@ ; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] ; TFFALLBACK-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to * -; TFFALLBACK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 4 +; TFFALLBACK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 8 ; TFFALLBACK-NEXT: [[TMP6:%.*]] = call @foo_vector_nomask( [[WIDE_LOAD]]) ; TFFALLBACK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] ; TFFALLBACK-NEXT: [[TMP8:%.*]] = bitcast i64* [[TMP7]] to * -; TFFALLBACK-NEXT: store [[TMP6]], * [[TMP8]], align 4 +; TFFALLBACK-NEXT: store [[TMP6]], * [[TMP8]], align 8 ; TFFALLBACK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 ; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; TFFALLBACK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TFFALLBACK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFFALLBACK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TFFALLBACK: middle.block: ; TFFALLBACK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; TFFALLBACK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] @@ -493,13 +715,13 @@ ; TFFALLBACK: for.body: ; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 8 ; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR6:[0-9]+]] ; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFFALLBACK-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 8 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TFFALLBACK: for.cond.cleanup: ; TFFALLBACK-NEXT: ret void ; @@ -541,16 +763,16 @@ ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFNONE-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to * -; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 4 +; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 8 ; TFNONE-NEXT: [[TMP6:%.*]] = call @foo_vector_nomask( [[WIDE_LOAD]]) ; TFNONE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[TMP8:%.*]] = bitcast i64* [[TMP7]] to * -; TFNONE-NEXT: store [[TMP6]], * [[TMP8]], align 4 +; TFNONE-NEXT: store [[TMP6]], * [[TMP8]], align 8 ; TFNONE-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 ; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; TFNONE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TFNONE-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFNONE-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; TFNONE: middle.block: ; TFNONE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; TFNONE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] @@ -560,123 +782,117 @@ ; TFNONE: for.body: ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFNONE-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 8 ; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] -; TFNONE-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFNONE-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; ; TFALWAYS-LABEL: @test_widen_optmask( ; TFALWAYS-NEXT: entry: -; TFALWAYS-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFALWAYS-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFALWAYS-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFALWAYS-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFALWAYS: vector.ph: +; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFALWAYS-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFALWAYS-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFALWAYS-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFALWAYS-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFALWAYS-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) ; TFALWAYS-NEXT: br label [[VECTOR_BODY:%.*]] ; TFALWAYS: vector.body: -; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_CALL_CONTINUE2:%.*]] ] -; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <2 x i1> [ , [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_CALL_CONTINUE2]] ] -; TFALWAYS-NEXT: [[TMP0:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] -; TFALWAYS-NEXT: [[TMP1:%.*]] = bitcast i64* [[TMP0]] to <2 x i64>* -; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* [[TMP1]], i32 4, <2 x i1> [[ACTIVE_LANE_MASK]], <2 x i64> poison) -; TFALWAYS-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 0 -; TFALWAYS-NEXT: br i1 [[TMP2]], label [[PRED_CALL_IF:%.*]], label [[PRED_CALL_CONTINUE:%.*]] -; TFALWAYS: pred.call.if: -; TFALWAYS-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[WIDE_MASKED_LOAD]], i32 0 -; TFALWAYS-NEXT: [[TMP4:%.*]] = call i64 @foo(i64 [[TMP3]]) #[[ATTR7:[0-9]+]] -; TFALWAYS-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> poison, i64 [[TMP4]], i32 0 -; TFALWAYS-NEXT: br label [[PRED_CALL_CONTINUE]] -; TFALWAYS: pred.call.continue: -; TFALWAYS-NEXT: [[TMP6:%.*]] = phi <2 x i64> [ poison, [[VECTOR_BODY]] ], [ [[TMP5]], [[PRED_CALL_IF]] ] -; TFALWAYS-NEXT: [[TMP7:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 1 -; TFALWAYS-NEXT: br i1 [[TMP7]], label [[PRED_CALL_IF1:%.*]], label [[PRED_CALL_CONTINUE2]] -; TFALWAYS: pred.call.if1: -; TFALWAYS-NEXT: [[TMP8:%.*]] = extractelement <2 x i64> [[WIDE_MASKED_LOAD]], i32 1 -; TFALWAYS-NEXT: [[TMP9:%.*]] = call i64 @foo(i64 [[TMP8]]) #[[ATTR7]] -; TFALWAYS-NEXT: [[TMP10:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[TMP9]], i32 1 -; TFALWAYS-NEXT: br label [[PRED_CALL_CONTINUE2]] -; TFALWAYS: pred.call.continue2: -; TFALWAYS-NEXT: [[TMP11:%.*]] = phi <2 x i64> [ [[TMP6]], [[PRED_CALL_CONTINUE]] ], [ [[TMP10]], [[PRED_CALL_IF1]] ] -; TFALWAYS-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] -; TFALWAYS-NEXT: [[TMP13:%.*]] = bitcast i64* [[TMP12]] to <2 x i64>* -; TFALWAYS-NEXT: call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> [[TMP11]], <2 x i64>* [[TMP13]], i32 4, <2 x i1> [[ACTIVE_LANE_MASK]]) -; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 -; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[INDEX_NEXT]], i64 1024) -; TFALWAYS-NEXT: [[TMP14:%.*]] = xor <2 x i1> [[ACTIVE_LANE_MASK_NEXT]], -; TFALWAYS-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP14]], i32 0 -; TFALWAYS-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[TMP8:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[TMP9:%.*]] = bitcast i64* [[TMP8]] to * +; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0nxv2i64(* [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFALWAYS-NEXT: [[TMP10:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) +; TFALWAYS-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFALWAYS-NEXT: [[TMP12:%.*]] = bitcast i64* [[TMP11]] to * +; TFALWAYS-NEXT: call void @llvm.masked.store.nxv2i64.p0nxv2i64( [[TMP10]], * [[TMP12]], i32 8, [[ACTIVE_LANE_MASK]]) +; TFALWAYS-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; TFALWAYS-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 2 +; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP14]] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFALWAYS-NEXT: [[TMP15:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFALWAYS-NEXT: [[TMP16:%.*]] = extractelement [[TMP15]], i32 0 +; TFALWAYS-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TFALWAYS: middle.block: ; TFALWAYS-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFALWAYS: scalar.ph: -; TFALWAYS-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; TFALWAYS-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] ; TFALWAYS: for.body: ; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 -; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR7]] +; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 8 +; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR7:[0-9]+]] ; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] -; TFALWAYS-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFALWAYS-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 8 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TFALWAYS: for.cond.cleanup: ; TFALWAYS-NEXT: ret void ; ; TFFALLBACK-LABEL: @test_widen_optmask( ; TFFALLBACK-NEXT: entry: -; TFFALLBACK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFFALLBACK-NEXT: [[TMP2:%.*]] = icmp ult i64 -1025, [[TMP1]] +; TFFALLBACK-NEXT: br i1 [[TMP2]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFFALLBACK: vector.ph: +; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; TFFALLBACK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; TFFALLBACK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1 +; TFFALLBACK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]] +; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) ; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] ; TFFALLBACK: vector.body: -; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_CALL_CONTINUE2:%.*]] ] -; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <2 x i1> [ , [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_CALL_CONTINUE2]] ] -; TFFALLBACK-NEXT: [[TMP0:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] -; TFFALLBACK-NEXT: [[TMP1:%.*]] = bitcast i64* [[TMP0]] to <2 x i64>* -; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* [[TMP1]], i32 4, <2 x i1> [[ACTIVE_LANE_MASK]], <2 x i64> poison) -; TFFALLBACK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 0 -; TFFALLBACK-NEXT: br i1 [[TMP2]], label [[PRED_CALL_IF:%.*]], label [[PRED_CALL_CONTINUE:%.*]] -; TFFALLBACK: pred.call.if: -; TFFALLBACK-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[WIDE_MASKED_LOAD]], i32 0 -; TFFALLBACK-NEXT: [[TMP4:%.*]] = call i64 @foo(i64 [[TMP3]]) #[[ATTR7:[0-9]+]] -; TFFALLBACK-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> poison, i64 [[TMP4]], i32 0 -; TFFALLBACK-NEXT: br label [[PRED_CALL_CONTINUE]] -; TFFALLBACK: pred.call.continue: -; TFFALLBACK-NEXT: [[TMP6:%.*]] = phi <2 x i64> [ poison, [[VECTOR_BODY]] ], [ [[TMP5]], [[PRED_CALL_IF]] ] -; TFFALLBACK-NEXT: [[TMP7:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 1 -; TFFALLBACK-NEXT: br i1 [[TMP7]], label [[PRED_CALL_IF1:%.*]], label [[PRED_CALL_CONTINUE2]] -; TFFALLBACK: pred.call.if1: -; TFFALLBACK-NEXT: [[TMP8:%.*]] = extractelement <2 x i64> [[WIDE_MASKED_LOAD]], i32 1 -; TFFALLBACK-NEXT: [[TMP9:%.*]] = call i64 @foo(i64 [[TMP8]]) #[[ATTR7]] -; TFFALLBACK-NEXT: [[TMP10:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[TMP9]], i32 1 -; TFFALLBACK-NEXT: br label [[PRED_CALL_CONTINUE2]] -; TFFALLBACK: pred.call.continue2: -; TFFALLBACK-NEXT: [[TMP11:%.*]] = phi <2 x i64> [ [[TMP6]], [[PRED_CALL_CONTINUE]] ], [ [[TMP10]], [[PRED_CALL_IF1]] ] -; TFFALLBACK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] -; TFFALLBACK-NEXT: [[TMP13:%.*]] = bitcast i64* [[TMP12]] to <2 x i64>* -; TFFALLBACK-NEXT: call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> [[TMP11]], <2 x i64>* [[TMP13]], i32 4, <2 x i1> [[ACTIVE_LANE_MASK]]) -; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 -; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[INDEX_NEXT]], i64 1024) -; TFFALLBACK-NEXT: [[TMP14:%.*]] = xor <2 x i1> [[ACTIVE_LANE_MASK_NEXT]], -; TFFALLBACK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP14]], i32 0 -; TFFALLBACK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP9:%.*]] = bitcast i64* [[TMP8]] to * +; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0nxv2i64(* [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; TFFALLBACK-NEXT: [[TMP10:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) +; TFFALLBACK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP12:%.*]] = bitcast i64* [[TMP11]] to * +; TFFALLBACK-NEXT: call void @llvm.masked.store.nxv2i64.p0nxv2i64( [[TMP10]], * [[TMP12]], i32 8, [[ACTIVE_LANE_MASK]]) +; TFFALLBACK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 2 +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP14]] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1024) +; TFFALLBACK-NEXT: [[TMP15:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) +; TFFALLBACK-NEXT: [[TMP16:%.*]] = extractelement [[TMP15]], i32 0 +; TFFALLBACK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; TFFALLBACK: middle.block: ; TFFALLBACK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFFALLBACK: scalar.ph: -; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] ; TFFALLBACK: for.body: ; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 -; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR7]] +; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 8 +; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR7:[0-9]+]] ; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] -; TFFALLBACK-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFFALLBACK-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 8 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; TFFALLBACK: for.cond.cleanup: ; TFFALLBACK-NEXT: ret void ;