Index: llvm/lib/Analysis/IVDescriptors.cpp =================================================================== --- llvm/lib/Analysis/IVDescriptors.cpp +++ llvm/lib/Analysis/IVDescriptors.cpp @@ -1428,10 +1428,11 @@ ConstantInt *CV = ConstStep->getValue(); const DataLayout &DL = Phi->getModule()->getDataLayout(); - int64_t Size = static_cast(DL.getTypeAllocSize(ElementType)); - if (!Size) + TypeSize TySize = DL.getTypeAllocSize(ElementType); + if (TySize.isZero() || TySize.isScalable()) return false; + int64_t Size = static_cast(TySize.getFixedSize()); int64_t CVSize = CV->getSExtValue(); if (CVSize % Size) return false; Index: llvm/test/Transforms/CanonicalizeFreezeInLoops/phis.ll =================================================================== --- llvm/test/Transforms/CanonicalizeFreezeInLoops/phis.ll +++ llvm/test/Transforms/CanonicalizeFreezeInLoops/phis.ll @@ -112,3 +112,37 @@ exit: ret void } + +; Negative test - ensure we handle scalable vector types correctly +define void @no_freeze_scalable(* %ptr) { +; CHECK-LABEL: @no_freeze_scalable( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[LSR_IV1:%.*]] = phi * [ [[TMP0:%.*]], [[FOR_BODY]] ], [ [[PTR:%.*]], [[ENTRY:%.*]] ] +; CHECK-NEXT: [[J_0117:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ] +; CHECK-NEXT: [[LSR_IV12:%.*]] = bitcast * [[LSR_IV1]] to i8* +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_0117]], 1 +; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, i8* [[LSR_IV12]], i64 4 +; CHECK-NEXT: [[TMP0]] = bitcast i8* [[UGLYGEP]] to * +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[INC]], 1024 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[END:%.*]] +; CHECK: end: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %lsr.iv1 = phi * [ %0, %for.body ], [ %ptr, %entry ] + %j.0117 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %lsr.iv12 = bitcast * %lsr.iv1 to i8* + %inc = add nuw nsw i64 %j.0117, 1 + %uglygep = getelementptr i8, i8* %lsr.iv12, i64 4 + %0 = bitcast i8* %uglygep to * + %cmp = icmp ne i64 %inc, 1024 + br i1 %cmp, label %for.body, label %end + +end: ; preds = %for.body + ret void +} Index: llvm/unittests/Analysis/IVDescriptorsTest.cpp =================================================================== --- llvm/unittests/Analysis/IVDescriptorsTest.cpp +++ llvm/unittests/Analysis/IVDescriptorsTest.cpp @@ -97,6 +97,49 @@ }); } +TEST(IVDescriptorsTest, LoopWithScalableTypes) { + // Parse the module. + LLVMContext Context; + + std::unique_ptr M = parseIR( + Context, + R"(define void @foo(* %ptr) { +entry: + br label %for.body + +for.body: + %lsr.iv1 = phi * [ %0, %for.body ], [ %ptr, %entry ] + %j.0117 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %lsr.iv12 = bitcast * %lsr.iv1 to i8* + %inc = add nuw nsw i64 %j.0117, 1 + %uglygep = getelementptr i8, i8* %lsr.iv12, i64 4 + %0 = bitcast i8* %uglygep to * + %cmp = icmp ne i64 %inc, 1024 + br i1 %cmp, label %for.body, label %end + +end: + ret void +})" + ); + + runWithLoopInfoAndSE( + *M, "foo", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { + Function::iterator FI = F.begin(); + // First basic block is entry - skip it. + BasicBlock *Header = &*(++FI); + assert(Header->getName() == "for.body"); + Loop *L = LI.getLoopFor(Header); + EXPECT_NE(L, nullptr); + PHINode *Inst_iv = dyn_cast(&Header->front()); + assert(Inst_iv->getName() == "lsr.iv1"); + InductionDescriptor IndDesc; + bool IsInductionPHI = + InductionDescriptor::isInductionPHI(Inst_iv, L, &SE, IndDesc); + EXPECT_FALSE(IsInductionPHI); + }); +} + + // Depending on how SCEV deals with ptrtoint cast, the step of a phi could be // a pointer, and InductionDescriptor used to fail with an assertion. // So just check that it doesn't assert.