diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -564,6 +564,16 @@ return replaceInstUsesWith(I, NewPow); } + // powi(x, y) * powi(x, z) -> powi(x, y + z) + if (match(Op0, m_Intrinsic(m_Value(X), m_Value(Y))) && + match(Op1, m_Intrinsic(m_Specific(X), m_Value(Z))) && + Y->getType() == Z->getType()) { + auto *YZ = Builder.CreateAdd(Y, Z); + auto *NewPow = Builder.CreateIntrinsic( + Intrinsic::powi, {X->getType(), YZ->getType()}, {X, YZ}, &I); + return replaceInstUsesWith(I, NewPow); + } + // exp(X) * exp(Y) -> exp(X + Y) if (match(Op0, m_Intrinsic(m_Value(X))) && match(Op1, m_Intrinsic(m_Value(Y)))) { diff --git a/llvm/test/Transforms/InstCombine/powi.ll b/llvm/test/Transforms/InstCombine/powi.ll --- a/llvm/test/Transforms/InstCombine/powi.ll +++ b/llvm/test/Transforms/InstCombine/powi.ll @@ -143,10 +143,9 @@ define double @powi_fmul_powi(double %x, i32 %y, i32 %z) { ; CHECK-LABEL: @powi_fmul_powi( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) -; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]] -; CHECK-NEXT: ret double [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Z:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[TMP0]]) +; CHECK-NEXT: ret double [[TMP1]] ; entry: %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y) @@ -158,10 +157,9 @@ define double @powi_fmul_powi_fast_on_fmul(double %x, i32 %y, i32 %z) { ; CHECK-LABEL: @powi_fmul_powi_fast_on_fmul( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) -; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul fast double [[P2]], [[P1]] -; CHECK-NEXT: ret double [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Z:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[TMP0]]) +; CHECK-NEXT: ret double [[TMP1]] ; entry: %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y) @@ -188,10 +186,9 @@ define double @powi_fmul_powi_same_power(double %x, i32 %y, i32 %z) { ; CHECK-LABEL: @powi_fmul_powi_same_power( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) -; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[X]], i32 [[Y]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]] -; CHECK-NEXT: ret double [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[Y:%.*]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[TMP0]]) +; CHECK-NEXT: ret double [[TMP1]] ; entry: %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y) @@ -205,15 +202,15 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) ; CHECK-NEXT: tail call void @use(double [[P1]]) -; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]] -; CHECK-NEXT: ret double [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Y]], [[Z:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[TMP0]]) +; CHECK-NEXT: ret double [[TMP1]] ; entry: %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y) tail call void @use(double %p1) %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z) - %mul = fmul reassoc double %p2, %p1 + %mul = fmul reassoc double %p1, %p2 ret double %mul } @@ -222,9 +219,9 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Z:%.*]]) ; CHECK-NEXT: tail call void @use(double [[P1]]) -; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[X]], i32 [[Y:%.*]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]] -; CHECK-NEXT: ret double [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Y:%.*]], [[Z]] +; CHECK-NEXT: [[TMP1:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[TMP0]]) +; CHECK-NEXT: ret double [[TMP1]] ; entry: %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)