Index: llvm/lib/Analysis/ScalarEvolution.cpp =================================================================== --- llvm/lib/Analysis/ScalarEvolution.cpp +++ llvm/lib/Analysis/ScalarEvolution.cpp @@ -1778,6 +1778,18 @@ } } + if (auto *SA = dyn_cast(Op)) { + // zext((A * B * ...)) --> (zext(A) * zext(B) * ...) + if (SA->hasNoUnsignedWrap()) { + // If the multiply does not unsign overflow then we can, by definition, + // commute the zero extension with the multiply operation. + SmallVector Ops; + for (const auto *Op : SA->operands()) + Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); + return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); + } + } + // The cast wasn't folded; create an explicit cast node. // Recompute the insert position, as it may have been invalidated. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; Index: llvm/test/Analysis/ScalarEvolution/zext-mul.ll =================================================================== --- /dev/null +++ llvm/test/Analysis/ScalarEvolution/zext-mul.ll @@ -0,0 +1,31 @@ +; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s + +; Check that we convert +; zext((a * b)) +; to +; (zext(a) * zext(b)) + +declare i32 @get_int(); + +; Transform doesn't apply here, because %a lacks range metadata. +; CHECK-LABEL: @no_range +define void @no_range() { + %a = call i32 @get_int() + %b = mul i32 %a, 4 + %c = zext i32 %b to i64 + ; CHECK: %c + ; CHECK-NEXT: --> (zext i32 (4 * %a) to i64) + ret void +} + +; CHECK-LABEL: @range +define void @range() { + %a = call i32 @get_int(), !range !0 + %b = mul i32 %a, 4 + %c = zext i32 %b to i64 + ; CHECK: %c + ; CHECK-NEXT: --> (4 * (zext i32 %a to i64)) + ret void +} + +!0 = !{i32 0, i32 100}