Index: lib/Transforms/Utils/SimplifyLibCalls.cpp =================================================================== --- lib/Transforms/Utils/SimplifyLibCalls.cpp +++ lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -1189,19 +1189,11 @@ Value *LibCallSimplifier::optimizeFabs(CallInst *CI, IRBuilder<> &B) { Function *Callee = CI->getCalledFunction(); - Value *Ret = nullptr; StringRef Name = Callee->getName(); if (Name == "fabs" && hasFloatVersion(Name)) - Ret = optimizeUnaryDoubleFP(CI, B, false); + return optimizeUnaryDoubleFP(CI, B, false); - Value *Op = CI->getArgOperand(0); - if (Instruction *I = dyn_cast(Op)) { - // Fold fabs(x * x) -> x * x; any squared FP value must already be positive. - if (I->getOpcode() == Instruction::FMul) - if (I->getOperand(0) == I->getOperand(1)) - return Op; - } - return Ret; + return nullptr; } Value *LibCallSimplifier::optimizeFMinFMax(CallInst *CI, IRBuilder<> &B) { Index: test/Transforms/InstCombine/fabs.ll =================================================================== --- test/Transforms/InstCombine/fabs.ll +++ test/Transforms/InstCombine/fabs.ll @@ -13,7 +13,8 @@ ; CHECK-LABEL: square_fabs_call_f32( ; CHECK-NEXT: %mul = fmul float %x, %x -; CHECK-NEXT: ret float %mul +; CHECK-NEXT: %fabsf = tail call float @fabsf(float %mul) +; CHECK-NEXT: ret float %fabsf } define double @square_fabs_call_f64(double %x) { @@ -23,7 +24,8 @@ ; CHECK-LABEL: square_fabs_call_f64( ; CHECK-NEXT: %mul = fmul double %x, %x -; CHECK-NEXT: ret double %mul +; CHECK-NEXT: %fabs = tail call double @fabs(double %mul) +; CHECK-NEXT: ret double %fabs } define fp128 @square_fabs_call_f128(fp128 %x) { @@ -33,7 +35,8 @@ ; CHECK-LABEL: square_fabs_call_f128( ; CHECK-NEXT: %mul = fmul fp128 %x, %x -; CHECK-NEXT: ret fp128 %mul +; CHECK-NEXT: %fabsl = tail call fp128 @fabsl(fp128 %mul) +; CHECK-NEXT: ret fp128 %fabsl } ; Make sure all intrinsic calls are eliminated when the input is known positive. @@ -49,7 +52,8 @@ ; CHECK-LABEL: square_fabs_intrinsic_f32( ; CHECK-NEXT: %mul = fmul float %x, %x -; CHECK-NEXT: ret float %mul +; CHECK-NEXT: %fabsf = tail call float @llvm.fabs.f32(float %mul) +; CHECK-NEXT: ret float %fabsf } define double @square_fabs_intrinsic_f64(double %x) { @@ -59,7 +63,8 @@ ; CHECK-LABEL: square_fabs_intrinsic_f64( ; CHECK-NEXT: %mul = fmul double %x, %x -; CHECK-NEXT: ret double %mul +; CHECK-NEXT: %fabs = tail call double @llvm.fabs.f64(double %mul) +; CHECK-NEXT: ret double %fabs } define fp128 @square_fabs_intrinsic_f128(fp128 %x) { @@ -69,7 +74,8 @@ ; CHECK-LABEL: square_fabs_intrinsic_f128( ; CHECK-NEXT: %mul = fmul fp128 %x, %x -; CHECK-NEXT: ret fp128 %mul +; CHECK-NEXT: %fabsl = tail call fp128 @llvm.fabs.f128(fp128 %mul) +; CHECK-NEXT: ret fp128 %fabsl } ; Shrinking a library call to a smaller type should not be inhibited by nor inhibit the square optimization. @@ -82,7 +88,10 @@ ret float %trunc ; CHECK-LABEL: square_fabs_shrink_call1( -; CHECK-NEXT: %trunc = fmul float %x, %x +; CHECK-NEXT: %ext = fpext float %x to double +; CHECK-NEXT: %sq = fmul double %ext, %ext +; CHECK-NEXT: call double @fabs(double %sq) +; CHECK-NEXT: %trunc = fptrunc double %fabs to float ; CHECK-NEXT: ret float %trunc } @@ -95,7 +104,8 @@ ; CHECK-LABEL: square_fabs_shrink_call2( ; CHECK-NEXT: %sq = fmul float %x, %x -; CHECK-NEXT: ret float %sq +; CHECK-NEXT: %fabsf = call float @fabsf(float %sq) +; CHECK-NEXT: ret float %fabsf } ; CHECK-LABEL: @fabs_select_constant_negative_positive( Index: test/Transforms/InstCombine/fast-math.ll =================================================================== --- test/Transforms/InstCombine/fast-math.ll +++ test/Transforms/InstCombine/fast-math.ll @@ -672,7 +672,8 @@ ; CHECK-LABEL: sqrt_intrinsic_arg_4th( ; CHECK-NEXT: %mul = fmul fast double %x, %x -; CHECK-NEXT: ret double %mul +; CHECK-NEXT: %fabs = call fast double @llvm.fabs.f64(double %mul) +; CHECK-NEXT: ret double %fabs } define double @sqrt_intrinsic_arg_5th(double %x) { @@ -684,8 +685,9 @@ ; CHECK-LABEL: sqrt_intrinsic_arg_5th( ; CHECK-NEXT: %mul = fmul fast double %x, %x +; CHECK-NEXT: %fabs = call fast double @llvm.fabs.f64(double %mul) ; CHECK-NEXT: %sqrt1 = call fast double @llvm.sqrt.f64(double %x) -; CHECK-NEXT: %1 = fmul fast double %mul, %sqrt1 +; CHECK-NEXT: %1 = fmul fast double %fabs, %sqrt1 ; CHECK-NEXT: ret double %1 }