Index: lib/Transforms/InstCombine/InstCombineMulDivRem.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -728,6 +728,35 @@ } } + if (AllowReassociate) { + Value *Opnd0 = nullptr; + Value *Opnd1 = nullptr; + if (Op0->hasOneUse() && Op1->hasOneUse()) { + BuilderTy::FastMathFlagGuard Guard(Builder); + Builder.setFastMathFlags(I.getFastMathFlags()); + + // exp(a) * exp(b) -> exp(a + b) + if (match(Op0, m_Intrinsic(m_Value(Opnd0))) && + match(Op1, m_Intrinsic(m_Value(Opnd1)))) { + Value *FAddVal = Builder.CreateFAdd(Opnd0, Opnd1); + Value *Exp = + Intrinsic::getDeclaration(I.getModule(), Intrinsic::exp, I.getType()); + Value *ExpCall = Builder.CreateCall(Exp, FAddVal); + return replaceInstUsesWith(I, ExpCall); + } + + // exp2(a) * exp2(b) -> exp2(a + b) + if (match(Op0, m_Intrinsic(m_Value(Opnd0))) && + match(Op1, m_Intrinsic(m_Value(Opnd1)))) { + Value *FAddVal = Builder.CreateFAdd(Opnd0, Opnd1); + Value *Exp = + Intrinsic::getDeclaration(I.getModule(), Intrinsic::exp2, I.getType()); + Value *ExpCall = Builder.CreateCall(Exp, FAddVal); + return replaceInstUsesWith(I, ExpCall); + } + } + } + // Handle symmetric situation in a 2-iteration loop Value *Opnd0 = Op0; Value *Opnd1 = Op1; Index: test/Transforms/InstCombine/fmul-exp.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/fmul-exp.ll @@ -0,0 +1,67 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -instcombine < %s | FileCheck %s + +declare double @llvm.exp.f64(double) nounwind readnone speculatable +declare void @use(double) + +; exp(a) * exp(b) no math flags +define double @exp_a_exp_b(double %a, double %b) { +; CHECK-LABEL: @exp_a_exp_b( +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.exp.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul double [[TMP1]], [[TMP2]] +; CHECK-NEXT: ret double [[MUL]] +; + %1 = call double @llvm.exp.f64(double %a) + %2 = call double @llvm.exp.f64(double %b) + %mul = fmul double %1, %2 + ret double %mul +} + +; exp(a) * exp(b) fast-math, multiple uses +define double @exp_a_exp_b_multiple_uses(double %a, double %b) { +; CHECK-LABEL: @exp_a_exp_b_multiple_uses( +; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.exp.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP2:%.*]] = call fast double @llvm.exp.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul fast double [[TMP1]], [[TMP2]] +; CHECK-NEXT: call void @use(double [[TMP2]]) +; CHECK-NEXT: ret double [[MUL]] +; + %1 = call fast double @llvm.exp.f64(double %a) + %2 = call fast double @llvm.exp.f64(double %b) + %mul = fmul fast double %1, %2 + call void @use(double %2) + ret double %mul +} + +; exp(a) * exp(b) => exp(a+b) with fast-math +define double @exp_a_exp_b_fast(double %a, double %b) { +; CHECK-LABEL: @exp_a_exp_b_fast( +; CHECK-NEXT: [[TMP1:%.*]] = fadd fast double [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = call fast double @llvm.exp.f64(double [[TMP1]]) +; CHECK-NEXT: ret double [[TMP2]] +; + %1 = call fast double @llvm.exp.f64(double %a) + %2 = call fast double @llvm.exp.f64(double %b) + %mul = fmul fast double %1, %2 + ret double %mul +} + +; exp(a) * exp(b) * exp(c) * exp(d) => exp(a+b+c+d) with fast-math +define double @exp_a_exp_b_exp_c_exp_d_fast(double %a, double %b, double %c, double %d) { +; CHECK-LABEL: @exp_a_exp_b_exp_c_exp_d_fast( +; CHECK-NEXT: [[TMP1:%.*]] = fadd fast double [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = fadd fast double [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = fadd fast double [[TMP2]], [[D:%.*]] +; CHECK-NEXT: [[TMP4:%.*]] = call fast double @llvm.exp.f64(double [[TMP3]]) +; CHECK-NEXT: ret double [[TMP4]] +; + %1 = call fast double @llvm.exp.f64(double %a) + %2 = call fast double @llvm.exp.f64(double %b) + %mul = fmul fast double %1, %2 + %3 = call fast double @llvm.exp.f64(double %c) + %mul1 = fmul fast double %mul, %3 + %4 = call fast double @llvm.exp.f64(double %d) + %mul2 = fmul fast double %mul1, %4 + ret double %mul2 +} Index: test/Transforms/InstCombine/fmul-exp2.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/fmul-exp2.ll @@ -0,0 +1,67 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -instcombine < %s | FileCheck %s + +declare double @llvm.exp2.f64(double) nounwind readnone speculatable +declare void @use(double) + +; exp2(a) * exp2(b) no math flags +define double @exp2_a_exp2_b(double %a, double %b) { +; CHECK-LABEL: @exp2_a_exp2_b( +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp2.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.exp2.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul double [[TMP1]], [[TMP2]] +; CHECK-NEXT: ret double [[MUL]] +; + %1 = call double @llvm.exp2.f64(double %a) + %2 = call double @llvm.exp2.f64(double %b) + %mul = fmul double %1, %2 + ret double %mul +} + +; exp2(a) * exp2(b) fast-math, multiple uses +define double @exp2_a_exp2_b_multiple_uses(double %a, double %b) { +; CHECK-LABEL: @exp2_a_exp2_b_multiple_uses( +; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.exp2.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP2:%.*]] = call fast double @llvm.exp2.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul fast double [[TMP1]], [[TMP2]] +; CHECK-NEXT: call void @use(double [[TMP2]]) +; CHECK-NEXT: ret double [[MUL]] +; + %1 = call fast double @llvm.exp2.f64(double %a) + %2 = call fast double @llvm.exp2.f64(double %b) + %mul = fmul fast double %1, %2 + call void @use(double %2) + ret double %mul +} + +; exp2(a) * exp2(b) => exp2(a+b) with fast-math +define double @exp2_a_exp2_b_fast(double %a, double %b) { +; CHECK-LABEL: @exp2_a_exp2_b_fast( +; CHECK-NEXT: [[TMP1:%.*]] = fadd fast double [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = call fast double @llvm.exp2.f64(double [[TMP1]]) +; CHECK-NEXT: ret double [[TMP2]] +; + %1 = call fast double @llvm.exp2.f64(double %a) + %2 = call fast double @llvm.exp2.f64(double %b) + %mul = fmul fast double %1, %2 + ret double %mul +} + +; exp2(a) * exp2(b) * exp2(c) * exp2(d) => exp2(a+b+c+d) with fast-math +define double @exp2_a_exp2_b_exp2_c_exp2_d(double %a, double %b, double %c, double %d) { +; CHECK-LABEL: @exp2_a_exp2_b_exp2_c_exp2_d( +; CHECK-NEXT: [[TMP1:%.*]] = fadd fast double [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = fadd fast double [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = fadd fast double [[TMP2]], [[D:%.*]] +; CHECK-NEXT: [[TMP4:%.*]] = call fast double @llvm.exp2.f64(double [[TMP3]]) +; CHECK-NEXT: ret double [[TMP4]] +; + %1 = call fast double @llvm.exp2.f64(double %a) + %2 = call fast double @llvm.exp2.f64(double %b) + %mul = fmul fast double %1, %2 + %3 = call fast double @llvm.exp2.f64(double %c) + %mul1 = fmul fast double %mul, %3 + %4 = call fast double @llvm.exp2.f64(double %d) + %mul2 = fmul fast double %mul1, %4 + ret double %mul2 +}