diff --git a/llvm/include/llvm/Analysis/ScalarFuncs.def b/llvm/include/llvm/Analysis/ScalarFuncs.def
new file mode 100644
--- /dev/null
+++ b/llvm/include/llvm/Analysis/ScalarFuncs.def
@@ -0,0 +1,117 @@
+//===-- ScalarFuncs.def - Library information ----------*- C++ -*----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// This .def file creates mapping from standard IEEE math functions
+// their corresponding entries in the IBM MASS (scalar) library.
+// LLVM intrinsic math functions will be handled in PPCISelLowing to
+// allow existing optimizations like pow(x,0.5) --> sqrt(x).
+
+#if defined(TLI_DEFINE_SCALAR_MASS_FUNCS)
+#define TLI_DEFINE_SCALAR_MASS_FUNC(SCAL, MASSENTRY) {SCAL, MASSENTRY},
+#endif
+
+TLI_DEFINE_SCALAR_MASS_FUNC("acosf", "__xl_acosf")
+TLI_DEFINE_SCALAR_MASS_FUNC("__acosf_finite", "__xl_acosf")
+TLI_DEFINE_SCALAR_MASS_FUNC("acos", "__xl_acos")
+TLI_DEFINE_SCALAR_MASS_FUNC("__acos_finite", "__xl_acos")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("acoshf", "__xl_acoshf")
+TLI_DEFINE_SCALAR_MASS_FUNC("__acoshf_finite", "__xl_acoshf")
+TLI_DEFINE_SCALAR_MASS_FUNC("acosh", "__xl_acosh")
+TLI_DEFINE_SCALAR_MASS_FUNC("__acosh_finite", "__xl_acosh")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("asinf", "__xl_asinf")
+TLI_DEFINE_SCALAR_MASS_FUNC("__asinf_finite", "__xl_asinf")
+TLI_DEFINE_SCALAR_MASS_FUNC("asin", "__xl_asin")
+TLI_DEFINE_SCALAR_MASS_FUNC("__asin_finite", "__xl_asin")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("asinhf", "__xl_asinhf")
+TLI_DEFINE_SCALAR_MASS_FUNC("asinh", "__xl_asinh")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("atanf", "__xl_atanf")
+TLI_DEFINE_SCALAR_MASS_FUNC("atan", "__xl_atan")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("atan2f", "__xl_atan2f")
+TLI_DEFINE_SCALAR_MASS_FUNC("__atan2f_finite", "__xl_atan2f")
+TLI_DEFINE_SCALAR_MASS_FUNC("atan2", "__xl_atan2")
+TLI_DEFINE_SCALAR_MASS_FUNC("__atan2_finite", "__xl_atan2")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("atanhf", "__xl_atanhf")
+TLI_DEFINE_SCALAR_MASS_FUNC("__atanhf_finite", "__xl_atanhf")
+TLI_DEFINE_SCALAR_MASS_FUNC("atanh", "__xl_atanh")
+TLI_DEFINE_SCALAR_MASS_FUNC("__atanh_finite", "__xl_atanh")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("cbrtf", "__xl_cbrtf")
+TLI_DEFINE_SCALAR_MASS_FUNC("cbrt", "__xl_cbrt")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("cosf", "__xl_cosf")
+TLI_DEFINE_SCALAR_MASS_FUNC("cos", "__xl_cos")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("coshf", "__xl_coshf")
+TLI_DEFINE_SCALAR_MASS_FUNC("__coshf_finite", "__xl_coshf")
+TLI_DEFINE_SCALAR_MASS_FUNC("cosh", "__xl_cosh")
+TLI_DEFINE_SCALAR_MASS_FUNC("__cosh_finite", "__xl_cosh")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("erff", "__xl_erff")
+TLI_DEFINE_SCALAR_MASS_FUNC("erf", "__xl_erf")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("erfcf", "__xl_erfcf")
+TLI_DEFINE_SCALAR_MASS_FUNC("erfc", "__xl_erfc")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("expf", "__xl_expf")
+TLI_DEFINE_SCALAR_MASS_FUNC("__expf_finite", "__xl_expf")
+TLI_DEFINE_SCALAR_MASS_FUNC("exp", "__xl_exp")
+TLI_DEFINE_SCALAR_MASS_FUNC("__exp_finite", "__xl_exp")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("expm1f", "__xl_expm1f")
+TLI_DEFINE_SCALAR_MASS_FUNC("expm1", "__xl_expm1")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("hypotf", "__xl_hypotf")
+TLI_DEFINE_SCALAR_MASS_FUNC("hypot", "__xl_hypot")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("lgammaf", "__xl_lgammaf")
+TLI_DEFINE_SCALAR_MASS_FUNC("lgamma", "__xl_lgamma")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("logf", "__xl_logf")
+TLI_DEFINE_SCALAR_MASS_FUNC("__logf_finite", "__xl_logf")
+TLI_DEFINE_SCALAR_MASS_FUNC("log", "__xl_log")
+TLI_DEFINE_SCALAR_MASS_FUNC("__log_finite", "__xl_log")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("log10f", "__xl_log10f")
+TLI_DEFINE_SCALAR_MASS_FUNC("__log10f_finite", "__xl_log10f")
+TLI_DEFINE_SCALAR_MASS_FUNC("log10", "__xl_log10")
+TLI_DEFINE_SCALAR_MASS_FUNC("__log10_finite", "__xl_log10")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("log1pf", "__xl_log1pf")
+TLI_DEFINE_SCALAR_MASS_FUNC("log1p", "__xl_log1p")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("powf", "__xl_powf")
+TLI_DEFINE_SCALAR_MASS_FUNC("__powf_finite", "__xl_powf")
+TLI_DEFINE_SCALAR_MASS_FUNC("pow", "__xl_pow")
+TLI_DEFINE_SCALAR_MASS_FUNC("__pow_finite", "__xl_pow")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("rsqrt", "__xl_rsqrt")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("sinf", "__xl_sinf")
+TLI_DEFINE_SCALAR_MASS_FUNC("sin", "__xl_sin")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("sinhf", "__xl_sinhf")
+TLI_DEFINE_SCALAR_MASS_FUNC("__sinhf_finite", "__xl_sinhf")
+TLI_DEFINE_SCALAR_MASS_FUNC("sinh", "__xl_sinh")
+TLI_DEFINE_SCALAR_MASS_FUNC("__sinh_finite", "__xl_sinh")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("sqrt", "__xl_sqrt")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("tanf", "__xl_tanf")
+TLI_DEFINE_SCALAR_MASS_FUNC("tan", "__xl_tan")
+
+TLI_DEFINE_SCALAR_MASS_FUNC("tanhf", "__xl_tanhf")
+TLI_DEFINE_SCALAR_MASS_FUNC("tanh", "__xl_tanh")
+
+#undef TLI_DEFINE_SCALAR_MASS_FUNCS
+#undef TLI_DEFINE_SCALAR_MASS_FUNC
diff --git a/llvm/include/llvm/CodeGen/CommandFlags.h b/llvm/include/llvm/CodeGen/CommandFlags.h
--- a/llvm/include/llvm/CodeGen/CommandFlags.h
+++ b/llvm/include/llvm/CodeGen/CommandFlags.h
@@ -62,6 +62,8 @@
 
 bool getEnableNoSignedZerosFPMath();
 
+bool getEnableApproxFuncFPMath();
+
 bool getEnableNoTrappingFPMath();
 
 DenormalMode::DenormalModeKind getDenormalFPMath();
diff --git a/llvm/include/llvm/IR/Attributes.td b/llvm/include/llvm/IR/Attributes.td
--- a/llvm/include/llvm/IR/Attributes.td
+++ b/llvm/include/llvm/IR/Attributes.td
@@ -294,6 +294,7 @@
 def LessPreciseFPMAD : StrBoolAttr<"less-precise-fpmad">;
 def NoInfsFPMath : StrBoolAttr<"no-infs-fp-math">;
 def NoNansFPMath : StrBoolAttr<"no-nans-fp-math">;
+def ApproxFuncFPMath : StrBoolAttr<"approx-func-fp-math">;
 def NoSignedZerosFPMath : StrBoolAttr<"no-signed-zeros-fp-math">;
 def UnsafeFPMath : StrBoolAttr<"unsafe-fp-math">;
 def NoJumpTables : StrBoolAttr<"no-jump-tables">;
@@ -333,6 +334,7 @@
 def : MergeRule<"setAND<LessPreciseFPMADAttr>">;
 def : MergeRule<"setAND<NoInfsFPMathAttr>">;
 def : MergeRule<"setAND<NoNansFPMathAttr>">;
+def : MergeRule<"setAND<ApproxFuncFPMathAttr>">;
 def : MergeRule<"setAND<NoSignedZerosFPMathAttr>">;
 def : MergeRule<"setAND<UnsafeFPMathAttr>">;
 def : MergeRule<"setOR<NoImplicitFloatAttr>">;
diff --git a/llvm/lib/CodeGen/CommandFlags.cpp b/llvm/lib/CodeGen/CommandFlags.cpp
--- a/llvm/lib/CodeGen/CommandFlags.cpp
+++ b/llvm/lib/CodeGen/CommandFlags.cpp
@@ -58,6 +58,7 @@
 CGOPT(bool, EnableNoInfsFPMath)
 CGOPT(bool, EnableNoNaNsFPMath)
 CGOPT(bool, EnableNoSignedZerosFPMath)
+CGOPT(bool, EnableApproxFuncFPMath)
 CGOPT(bool, EnableNoTrappingFPMath)
 CGOPT(bool, EnableAIXExtendedAltivecABI)
 CGOPT(DenormalMode::DenormalModeKind, DenormalFPMath)
@@ -219,6 +220,12 @@
       cl::init(false));
   CGBINDOPT(EnableNoSignedZerosFPMath);
 
+  static cl::opt<bool> EnableApproxFuncFPMath(
+      "enable-approx-func-fp-math",
+      cl::desc("Enable FP math optimizations that assume approx func"),
+      cl::init(false));
+  CGBINDOPT(EnableApproxFuncFPMath);
+
   static cl::opt<bool> EnableNoTrappingFPMath(
       "enable-no-trapping-fp-math",
       cl::desc("Enable setting the FP exceptions build "
@@ -500,6 +507,7 @@
   Options.NoInfsFPMath = getEnableNoInfsFPMath();
   Options.NoNaNsFPMath = getEnableNoNaNsFPMath();
   Options.NoSignedZerosFPMath = getEnableNoSignedZerosFPMath();
+  Options.ApproxFuncFPMath = getEnableApproxFuncFPMath();
   Options.NoTrappingFPMath = getEnableNoTrappingFPMath();
 
   DenormalMode::DenormalModeKind DenormKind = getDenormalFPMath();
@@ -656,6 +664,7 @@
   HANDLE_BOOL_ATTR(EnableNoInfsFPMathView, "no-infs-fp-math");
   HANDLE_BOOL_ATTR(EnableNoNaNsFPMathView, "no-nans-fp-math");
   HANDLE_BOOL_ATTR(EnableNoSignedZerosFPMathView, "no-signed-zeros-fp-math");
+  HANDLE_BOOL_ATTR(EnableApproxFuncFPMathView, "approx-func-fp-math");
 
   if (DenormalFPMathView->getNumOccurrences() > 0 &&
       !F.hasFnAttribute("denormal-fp-math")) {
diff --git a/llvm/lib/Target/PowerPC/CMakeLists.txt b/llvm/lib/Target/PowerPC/CMakeLists.txt
--- a/llvm/lib/Target/PowerPC/CMakeLists.txt
+++ b/llvm/lib/Target/PowerPC/CMakeLists.txt
@@ -55,6 +55,7 @@
   PPCExpandISEL.cpp
   PPCPreEmitPeephole.cpp
   PPCLowerMASSVEntries.cpp
+  PPCGenScalarMASSEntries.cpp
   GISel/PPCCallLowering.cpp
   GISel/PPCRegisterBankInfo.cpp
   GISel/PPCLegalizerInfo.cpp
diff --git a/llvm/lib/Target/PowerPC/PPC.h b/llvm/lib/Target/PowerPC/PPC.h
--- a/llvm/lib/Target/PowerPC/PPC.h
+++ b/llvm/lib/Target/PowerPC/PPC.h
@@ -84,6 +84,10 @@
   void initializePPCLowerMASSVEntriesPass(PassRegistry &);
   extern char &PPCLowerMASSVEntriesID;
 
+  ModulePass *createPPCGenScalarMASSEntriesPass();
+  void initializePPCGenScalarMASSEntriesPass(PassRegistry &);
+  extern char &PPCGenScalarMASSEntriesID;
+
   InstructionSelector *
   createPPCInstructionSelector(const PPCTargetMachine &, const PPCSubtarget &,
                                const PPCRegisterBankInfo &);
diff --git a/llvm/lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp b/llvm/lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp
new file mode 100644
--- /dev/null
+++ b/llvm/lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp
@@ -0,0 +1,141 @@
+//===-- PPCGenScalarMASSEntries.cpp ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This transformation converts standard math functions into their
+// corresponding MASS (scalar) entries for PowerPC targets.
+// Following are examples of such conversion:
+//     tanh ---> __xl_tanh_finite
+// Such lowering is legal under the fast-math option.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPC.h"
+#include "PPCSubtarget.h"
+#include "PPCTargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+
+#define DEBUG_TYPE "ppc-gen-scalar-mass"
+
+using namespace llvm;
+
+namespace {
+
+class PPCGenScalarMASSEntries : public ModulePass {
+public:
+  static char ID;
+
+  PPCGenScalarMASSEntries() : ModulePass(ID) {
+    ScalarMASSFuncs = {
+#define TLI_DEFINE_SCALAR_MASS_FUNCS
+#include "llvm/Analysis/ScalarFuncs.def"
+    };
+  }
+
+  bool runOnModule(Module &M) override;
+
+  StringRef getPassName() const override {
+    return "PPC Generate Scalar MASS Entries";
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addRequired<TargetTransformInfoWrapperPass>();
+  }
+
+private:
+  std::map<StringRef, StringRef> ScalarMASSFuncs;
+  bool isCandidateSafeToLower(const CallInst &CI) const;
+  bool isFiniteCallSafe(const CallInst &CI) const;
+  bool createScalarMASSCall(StringRef MASSEntry, CallInst &CI,
+                            Function &Func) const;
+};
+
+} // namespace
+
+// Returns true if 'afn' flag exists on the call instruction with the math
+// function
+bool PPCGenScalarMASSEntries::isCandidateSafeToLower(const CallInst &CI) const {
+  return CI.hasApproxFunc();
+}
+
+// Returns true if 'nnan', 'ninf' and 'nsz' flags exist on the call instruction
+// with the math function
+bool PPCGenScalarMASSEntries::isFiniteCallSafe(const CallInst &CI) const {
+  // FIXME: no-errno and trapping-math need to be set for MASS converstion
+  // but they don't have IR representation.
+  return CI.hasNoNaNs() && CI.hasNoInfs() && CI.hasNoSignedZeros();
+}
+
+/// Lowers scalar math functions to scalar MASS functions.
+///     e.g.: tanh         --> __xl_tanh_finite or __xl_tanh
+/// Both function prototype and its callsite is updated during lowering.
+bool PPCGenScalarMASSEntries::createScalarMASSCall(StringRef MASSEntry,
+                                                   CallInst &CI,
+                                                   Function &Func) const {
+  if (CI.use_empty())
+    return false;
+
+  Module *M = Func.getParent();
+  assert(M && "Expecting a valid Module");
+
+  std::string MASSEntryStr = MASSEntry.str();
+  if (isFiniteCallSafe(CI))
+    MASSEntryStr += "_finite";
+
+  FunctionCallee FCache = M->getOrInsertFunction(
+      MASSEntryStr, Func.getFunctionType(), Func.getAttributes());
+
+  CI.setCalledFunction(FCache);
+
+  return true;
+}
+
+bool PPCGenScalarMASSEntries::runOnModule(Module &M) {
+  bool Changed = false;
+
+  auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
+  if (!TPC || skipModule(M))
+    return false;
+
+  for (Function &Func : M) {
+    if (!Func.isDeclaration())
+      continue;
+
+    auto Iter = ScalarMASSFuncs.find(Func.getName());
+    if (Iter == ScalarMASSFuncs.end())
+      continue;
+
+    // The call to createScalarMASSCall() invalidates the iterator over users
+    // upon replacing the users. Precomputing the current list of users allows
+    // us to replace all the call sites.
+    SmallVector<User *, 4> TheUsers;
+    for (auto *User : Func.users())
+      TheUsers.push_back(User);
+
+    for (auto *User : TheUsers)
+      if (auto *CI = dyn_cast_or_null<CallInst>(User)) {
+        if (isCandidateSafeToLower(*CI))
+          Changed |= createScalarMASSCall(Iter->second, *CI, Func);
+      }
+  }
+
+  return Changed;
+}
+
+char PPCGenScalarMASSEntries::ID = 0;
+
+char &llvm::PPCGenScalarMASSEntriesID = PPCGenScalarMASSEntries::ID;
+
+INITIALIZE_PASS(PPCGenScalarMASSEntries, DEBUG_TYPE,
+                "Generate Scalar MASS entries", false, false)
+
+ModulePass *llvm::createPPCGenScalarMASSEntriesPass() {
+  return new PPCGenScalarMASSEntries();
+}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -1273,6 +1273,24 @@
     SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
+    SDValue lowerToLibCall(const char *LibCallName, SDValue Op,
+                           SelectionDAG &DAG) const;
+    SDValue lowerLibCall(const char *LibCallFloatName,
+                         const char *LibCallDoubleName, SDValue Op,
+                         SelectionDAG &DAG) const;
+    bool isLowringToMASSFiniteSafe(SDValue Op) const;
+    bool isLowringToMASSSafe(SDValue Op) const;
+    SDValue lowerLibCallBase(const char *LibCallDoubleName,
+                             const char *LibCallFloatName,
+                             const char *LibCallDoubleNameFinite,
+                             const char *LibCallFloatNameFinite, SDValue Op,
+                             SelectionDAG &DAG) const;
+    SDValue lowerPow(SDValue Op, SelectionDAG &DAG) const;
+    SDValue lowerSin(SDValue Op, SelectionDAG &DAG) const;
+    SDValue lowerCos(SDValue Op, SelectionDAG &DAG) const;
+    SDValue lowerLog(SDValue Op, SelectionDAG &DAG) const;
+    SDValue lowerLog10(SDValue Op, SelectionDAG &DAG) const;
+    SDValue lowerExp(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -379,6 +379,24 @@
   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
   setOperationAction(ISD::FREM , MVT::f32, Expand);
   setOperationAction(ISD::FPOW , MVT::f32, Expand);
+
+  // MASS transformation for LLVM intrinsics with replicating fast-math flag
+  // to be consistent to PPCGenScalarMASSEntries pass  
+  if (TM.getOptLevel() == CodeGenOpt::Aggressive){
+    setOperationAction(ISD::FSIN , MVT::f64, Custom);
+    setOperationAction(ISD::FCOS , MVT::f64, Custom);
+    setOperationAction(ISD::FPOW , MVT::f64, Custom);
+    setOperationAction(ISD::FLOG, MVT::f64, Custom);
+    setOperationAction(ISD::FLOG10, MVT::f64, Custom);
+    setOperationAction(ISD::FEXP, MVT::f64, Custom);
+    setOperationAction(ISD::FSIN , MVT::f32, Custom);
+    setOperationAction(ISD::FCOS , MVT::f32, Custom);
+    setOperationAction(ISD::FPOW , MVT::f32, Custom);
+    setOperationAction(ISD::FLOG, MVT::f32, Custom);
+    setOperationAction(ISD::FLOG10, MVT::f32, Custom);
+    setOperationAction(ISD::FEXP, MVT::f32, Custom);
+  }
+
   if (Subtarget.hasSPE()) {
     setOperationAction(ISD::FMA  , MVT::f64, Expand);
     setOperationAction(ISD::FMA  , MVT::f32, Expand);
@@ -11027,6 +11045,12 @@
 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   switch (Op.getOpcode()) {
   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
+  case ISD::FPOW:               return lowerPow(Op, DAG);
+  case ISD::FSIN:               return lowerSin(Op, DAG);
+  case ISD::FCOS:               return lowerCos(Op, DAG);
+  case ISD::FLOG:               return lowerLog(Op, DAG);
+  case ISD::FLOG10:             return lowerLog10(Op, DAG);
+  case ISD::FEXP:               return lowerExp(Op, DAG);
   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
@@ -17686,6 +17710,92 @@
   return false;
 }
 
+SDValue PPCTargetLowering::lowerToLibCall(const char *LibCallName, SDValue Op,
+                                          SelectionDAG &DAG) const {
+  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+  TargetLowering::CallLoweringInfo CLI(DAG);
+  EVT RetVT = Op.getValueType();
+  SDValue Callee =
+      DAG.getExternalSymbol(LibCallName, TLI.getPointerTy(DAG.getDataLayout()));
+  bool SignExtend = TLI.shouldSignExtendTypeInLibCall(RetVT, false);
+  CLI.setDebugLoc(SDLoc(Op))
+      .setChain(DAG.getEntryNode())
+      .setLibCallee(CallingConv::C, RetVT.getTypeForEVT(*DAG.getContext()),
+                    Callee, std::move(CLI.getArgs()))
+      .setTailCall(true)
+      .setSExtResult(SignExtend)
+      .setZExtResult(!SignExtend)
+      .setIsPostTypeLegalization(true);
+  return TLI.LowerCallTo(CLI).first;
+}
+
+SDValue PPCTargetLowering::lowerLibCall(const char *LibCallFloatName,
+                                        const char *LibCallDoubleName,
+                                        SDValue Op, SelectionDAG &DAG) const {
+  if (Op.getValueType() == MVT::f32)
+    return lowerToLibCall(LibCallFloatName, Op, DAG);
+
+  if (Op.getValueType() == MVT::f64)
+    return lowerToLibCall(LibCallDoubleName, Op, DAG);
+
+  return SDValue();
+}
+
+bool PPCTargetLowering::isLowringToMASSFiniteSafe(SDValue Op) const {
+  SDNodeFlags Flags = Op.getNode()->getFlags();
+  return isLowringToMASSSafe(Op) && Flags.hasNoSignedZeros() &&
+         Flags.hasNoNaNs() && Flags.hasNoInfs();
+}
+
+bool PPCTargetLowering::isLowringToMASSSafe(SDValue Op) const {
+  return Op.getNode()->getFlags().hasApproximateFuncs();
+}
+
+SDValue PPCTargetLowering::lowerLibCallBase(const char *LibCallDoubleName,
+                                            const char *LibCallFloatName,
+                                            const char *LibCallDoubleNameFinite,
+                                            const char *LibCallFloatNameFinite,
+                                            SDValue Op,
+                                            SelectionDAG &DAG) const {
+  if (!isLowringToMASSSafe(Op))
+    return SDValue();
+
+  if (!isLowringToMASSFiniteSafe(Op))
+    return lowerLibCall(LibCallFloatName, LibCallDoubleName, Op, DAG);
+
+  return lowerLibCall(LibCallFloatNameFinite, LibCallDoubleNameFinite, Op, DAG);
+}
+
+SDValue PPCTargetLowering::lowerPow(SDValue Op, SelectionDAG &DAG) const {
+  return lowerLibCallBase("__xl_pow", "__xl_powf", "__xl_pow_finite",
+                          "__xl_powf_finite", Op, DAG);
+}
+
+SDValue PPCTargetLowering::lowerSin(SDValue Op, SelectionDAG &DAG) const {
+  return lowerLibCallBase("__xl_sin", "__xl_sinf", "__xl_sin_finite",
+                          "__xl_sinf_finite", Op, DAG);
+}
+
+SDValue PPCTargetLowering::lowerCos(SDValue Op, SelectionDAG &DAG) const {
+  return lowerLibCallBase("__xl_cos", "__xl_cosf", "__xl_cos_finite",
+                          "__xl_cosf_finite", Op, DAG);
+}
+
+SDValue PPCTargetLowering::lowerLog(SDValue Op, SelectionDAG &DAG) const {
+  return lowerLibCallBase("__xl_log", "__xl_logf", "__xl_log_finite",
+                          "__xl_logf_finite", Op, DAG);
+}
+
+SDValue PPCTargetLowering::lowerLog10(SDValue Op, SelectionDAG &DAG) const {
+  return lowerLibCallBase("__xl_log10", "__xl_log10f", "__xl_log10_finite",
+                          "__xl_log10f_finite", Op, DAG);
+}
+
+SDValue PPCTargetLowering::lowerExp(SDValue Op, SelectionDAG &DAG) const {
+  return lowerLibCallBase("__xl_exp", "__xl_expf", "__xl_exp_finite",
+                          "__xl_expf_finite", Op, DAG);
+}
+
 // If we happen to match to an aligned D-Form, check if the Frame Index is
 // adequately aligned. If it is not, reset the mode to match to X-Form.
 static void setXFormForUnalignedFI(SDValue N, unsigned Flags,
diff --git a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
--- a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -123,6 +123,7 @@
   initializePPCTLSDynamicCallPass(PR);
   initializePPCMIPeepholePass(PR);
   initializePPCLowerMASSVEntriesPass(PR);
+  initializePPCGenScalarMASSEntriesPass(PR);
   initializePPCExpandAtomicPseudoPass(PR);
   initializeGlobalISel(PR);
 }
@@ -429,6 +430,12 @@
   // Lower generic MASSV routines to PowerPC subtarget-specific entries.
   addPass(createPPCLowerMASSVEntriesPass());
 
+  // Generate PowerPC target-specific entries for scalar math functions
+  // that are available in IBM MASS (scalar) library.
+  if (TM->getOptLevel() == CodeGenOpt::Aggressive) {
+    addPass(createPPCGenScalarMASSEntriesPass());
+  }
+
   // If explicitly requested, add explicit data prefetch intrinsics.
   if (EnablePrefetch.getNumOccurrences() > 0)
     addPass(createLoopDataPrefetchPass());
diff --git a/llvm/lib/Target/TargetMachine.cpp b/llvm/lib/Target/TargetMachine.cpp
--- a/llvm/lib/Target/TargetMachine.cpp
+++ b/llvm/lib/Target/TargetMachine.cpp
@@ -63,6 +63,7 @@
   RESET_OPTION(NoInfsFPMath, "no-infs-fp-math");
   RESET_OPTION(NoNaNsFPMath, "no-nans-fp-math");
   RESET_OPTION(NoSignedZerosFPMath, "no-signed-zeros-fp-math");
+  RESET_OPTION(ApproxFuncFPMath, "approx-func-fp-math");
 }
 
 /// Returns the code generation relocation model. The choices are static, PIC,
diff --git a/llvm/test/CodeGen/PowerPC/O3-pipeline.ll b/llvm/test/CodeGen/PowerPC/O3-pipeline.ll
--- a/llvm/test/CodeGen/PowerPC/O3-pipeline.ll
+++ b/llvm/test/CodeGen/PowerPC/O3-pipeline.ll
@@ -21,6 +21,7 @@
 ; CHECK-NEXT:       Convert i1 constants to i32/i64 if they are returned
 ; CHECK-NEXT:       Expand Atomic instructions
 ; CHECK-NEXT:     PPC Lower MASS Entries
+; CHECK-NEXT:     PPC Generate Scalar MASS Entries
 ; CHECK-NEXT:     FunctionPass Manager
 ; CHECK-NEXT:       Dominator Tree Construction
 ; CHECK-NEXT:       Natural Loop Information
@@ -207,4 +208,4 @@
 
 define void @f() {
   ret void
-}
\ No newline at end of file
+}
diff --git a/llvm/test/CodeGen/PowerPC/lower-intrinsics-afn-mass.ll b/llvm/test/CodeGen/PowerPC/lower-intrinsics-afn-mass.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/lower-intrinsics-afn-mass.ll
@@ -0,0 +1,148 @@
+; RUN: llc -O3 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -O3 -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck %s
+
+declare float @llvm.cos.f32(float)
+declare float @llvm.exp.f32(float)
+declare float @llvm.log10.f32(float)
+declare float @llvm.log.f32(float)
+declare float @llvm.pow.f32(float, float)
+declare float @llvm.rint.f32(float)
+declare float @llvm.sin.f32(float)
+declare double @llvm.cos.f64(double)
+declare double @llvm.exp.f64(double)
+declare double @llvm.log.f64(double)
+declare double @llvm.log10.f64(double)
+declare double @llvm.pow.f64(double, double)
+declare double @llvm.sin.f64(double)
+
+; With afn flag specified per-function
+define float @cosf_f32(float %a) #1 {
+; CHECK-LABEL: cosf_f32
+; CHECK: __xl_cosf
+; CHECK: blr
+entry:
+  %0 = tail call afn float @llvm.cos.f32(float %a)
+  ret float %0
+}
+
+; With afn flag specified per-function
+define float @expf_f32(float %a) #1 {
+; CHECK-LABEL: expf_f32
+; CHECK: __xl_expf
+; CHECK: blr
+entry:
+  %0 = tail call afn float @llvm.exp.f32(float %a)
+  ret float %0
+}
+
+; With afn flag specified per-function
+define float @log10f_f32(float %a) #1 {
+; CHECK-LABEL: log10f_f32
+; CHECK: __xl_log10f
+; CHECK: blr
+entry:
+  %0 = tail call afn float @llvm.log10.f32(float %a)
+  ret float %0
+}
+
+; With afn flag specified per-function
+define float @logf_f32(float %a) #1 {
+; CHECK-LABEL: logf_f32
+; CHECK: __xl_logf
+; CHECK: blr
+entry:
+  %0 = tail call afn float @llvm.log.f32(float %a)
+  ret float %0
+}
+
+; With afn flag specified per-function
+define float @powf_f32(float %a, float %b) #1 {
+; CHECK-LABEL: powf_f32
+; CHECK: __xl_powf
+; CHECK: blr
+entry:
+  %0 = tail call afn float @llvm.pow.f32(float %a, float %b)
+  ret float %0
+}
+
+; With afn flag specified per-function
+define float @rintf_f32(float %a) #1 {
+; CHECK-LABEL: rintf_f32
+; CHECK-NOT: bl __xl_rintf
+; CHECK: blr
+entry:
+  %0 = tail call afn float @llvm.rint.f32(float %a)
+  ret float %0
+}
+
+; With afn flag specified per-function
+define float @sinf_f32(float %a) #1 {
+; CHECK-LABEL: sinf_f32
+; CHECK: __xl_sinf
+; CHECK: blr
+entry:
+  %0 = tail call afn float @llvm.sin.f32(float %a)
+  ret float %0
+}
+
+; With afn flag specified per-function
+define double @cos_f64(double %a) #1 {
+; CHECK-LABEL: cos_f64
+; CHECK: __xl_cos
+; CHECK: blr
+entry:
+  %0 = tail call afn double @llvm.cos.f64(double %a)
+  ret double %0
+}
+
+; With afn flag specified per-function
+define double @exp_f64(double %a) #1 {
+; CHECK-LABEL: exp_f64
+; CHECK: __xl_exp
+; CHECK: blr
+entry:
+  %0 = tail call afn double @llvm.exp.f64(double %a)
+  ret double %0
+}
+
+; With afn flag specified per-function
+define double @log_f64(double %a) #1 {
+; CHECK-LABEL: log_f64
+; CHECK: __xl_log
+; CHECK: blr
+entry:
+  %0 = tail call afn double @llvm.log.f64(double %a)
+  ret double %0
+}
+
+; With afn flag specified per-function
+define double @log10_f64(double %a) #1 {
+; CHECK-LABEL: log10_f64
+; CHECK: __xl_log10
+; CHECK: blr
+entry:
+  %0 = tail call afn double @llvm.log10.f64(double %a)
+  ret double %0
+}
+
+; With afn flag specified per-function
+define double @pow_f64(double %a, double %b) #1 {
+; CHECK-LABEL: pow_f64
+; CHECK: __xl_pow
+; CHECK: blr
+entry:
+  %0 = tail call afn double @llvm.pow.f64(double %a, double %b)
+  ret double %0
+}
+
+; With afn flag specified per-function
+define double @sin_f64(double %a) #1 {
+; CHECK-LABEL: sin_f64
+; CHECK: __xl_sin
+; CHECK: blr
+entry:
+  %0 = tail call afn double @llvm.sin.f64(double %a)
+  ret double %0
+}
+
+attributes #1 = { "approx-func-fp-math"="true" }
diff --git a/llvm/test/CodeGen/PowerPC/lower-intrinsics-fast-mass.ll b/llvm/test/CodeGen/PowerPC/lower-intrinsics-fast-mass.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/lower-intrinsics-fast-mass.ll
@@ -0,0 +1,148 @@
+; RUN: llc -O3 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -O3 -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck %s
+
+declare float @llvm.cos.f32(float)
+declare float @llvm.exp.f32(float)
+declare float @llvm.log10.f32(float)
+declare float @llvm.log.f32(float)
+declare float @llvm.pow.f32(float, float)
+declare float @llvm.rint.f32(float)
+declare float @llvm.sin.f32(float)
+declare double @llvm.cos.f64(double)
+declare double @llvm.exp.f64(double)
+declare double @llvm.log.f64(double)
+declare double @llvm.log10.f64(double)
+declare double @llvm.pow.f64(double, double)
+declare double @llvm.sin.f64(double)
+
+; With fast-math flag specified per-function
+define float @cosf_f32(float %a) #1 {
+; CHECK-LABEL: cosf_f32
+; CHECK: __xl_cosf_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz float @llvm.cos.f32(float %a)
+  ret float %0
+}
+
+; With fast-math flag specified per-function
+define float @expf_f32(float %a) #1 {
+; CHECK-LABEL: expf_f32
+; CHECK: __xl_expf_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz float @llvm.exp.f32(float %a)
+  ret float %0
+}
+
+; With fast-math flag specified per-function
+define float @log10f_f32(float %a) #1 {
+; CHECK-LABEL: log10f_f32
+; CHECK: __xl_log10f_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz float @llvm.log10.f32(float %a)
+  ret float %0
+}
+
+; With fast-math flag specified per-function
+define float @logf_f32(float %a) #1 {
+; CHECK-LABEL: logf_f32
+; CHECK: __xl_logf_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz float @llvm.log.f32(float %a)
+  ret float %0
+}
+
+; With fast-math flag specified per-function
+define float @powf_f32(float %a, float %b) #1 {
+; CHECK-LABEL: powf_f32
+; CHECK: __xl_powf_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz float @llvm.pow.f32(float %a, float %b)
+  ret float %0
+}
+
+; With fast-math flag specified per-function
+define float @rintf_f32(float %a) #1 {
+; CHECK-LABEL: rintf_f32
+; CHECK-NOT: bl __xl_rintf_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz float @llvm.rint.f32(float %a)
+  ret float %0
+}
+
+; With fast-math flag specified per-function
+define float @sinf_f32(float %a) #1 {
+; CHECK-LABEL: sinf_f32
+; CHECK: __xl_sinf_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz float @llvm.sin.f32(float %a)
+  ret float %0
+}
+
+; With fast-math flag specified per-function
+define double @cos_f64(double %a) #1 {
+; CHECK-LABEL: cos_f64
+; CHECK: __xl_cos_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz double @llvm.cos.f64(double %a)
+  ret double %0
+}
+
+; With fast-math flag specified per-function
+define double @exp_f64(double %a) #1 {
+; CHECK-LABEL: exp_f64
+; CHECK: __xl_exp_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz double @llvm.exp.f64(double %a)
+  ret double %0
+}
+
+; With fast-math flag specified per-function
+define double @log_f64(double %a) #1 {
+; CHECK-LABEL: log_f64
+; CHECK: __xl_log_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz double @llvm.log.f64(double %a)
+  ret double %0
+}
+
+; With fast-math flag specified per-function
+define double @log10_f64(double %a) #1 {
+; CHECK-LABEL: log10_f64
+; CHECK: __xl_log10_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz double @llvm.log10.f64(double %a)
+  ret double %0
+}
+
+; With fast-math flag specified per-function
+define double @pow_f64(double %a, double %b) #1 {
+; CHECK-LABEL: pow_f64
+; CHECK: __xl_pow_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz double @llvm.pow.f64(double %a, double %b)
+  ret double %0
+}
+
+; With fast-math flag specified per-function
+define double @sin_f64(double %a) #1 {
+; CHECK-LABEL: sin_f64
+; CHECK: __xl_sin_finite
+; CHECK: blr
+entry:
+  %0 = tail call nnan ninf afn nsz double @llvm.sin.f64(double %a)
+  ret double %0
+}
+
+attributes #1 = { "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "approx-func-fp-math"="true" }
diff --git a/llvm/test/CodeGen/PowerPC/lower-intrinsics-mass-aix.ll b/llvm/test/CodeGen/PowerPC/lower-intrinsics-mass-aix.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/lower-intrinsics-mass-aix.ll
@@ -0,0 +1,133 @@
+; RUN: llc -O3 -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck %s
+
+declare float @llvm.cos.f32(float) 
+declare double @llvm.cos.f64(double) 
+declare float @llvm.exp.f32(float) 
+declare double @llvm.exp.f64(double) 
+declare float @llvm.log10.f32(float) 
+declare double @llvm.log10.f64(double) 
+declare float @llvm.log.f32(float)
+declare double @llvm.log.f64(double) 
+declare float @llvm.pow.f32(float, float) 
+declare double @llvm.pow.f64(double, double) 
+declare float @llvm.rint.f32(float) 
+declare float @llvm.sin.f32(float) 
+declare double @llvm.sin.f64(double) 
+
+define float @cosf_f32(float %a) {
+; CHECK-LABEL: cosf_f32
+; CHECK-NOT: bl __xl_cosf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.cos.f32(float %a)
+  ret float %0
+}
+
+define double @cos_f64(double %a) {
+; CHECK-LABEL: cos_f64
+; CHECK-NOT: bl __xl_cos
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.cos.f64(double %a)
+  ret double %0
+}
+
+define float @expf_f32(float %a) {
+; CHECK-LABEL: expf_f32
+; CHECK-NOT: bl __xl_expf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.exp.f32(float %a)
+  ret float %0
+}
+
+define double @exp_f64(double %a) {
+; CHECK-LABEL: exp_f64
+; CHECK-NOT: bl __xl_exp
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.exp.f64(double %a)
+  ret double %0
+}
+
+define float @log10f_f32(float %a) {
+; CHECK-LABEL: log10f_f32
+; CHECK-NOT: bl __xl_log10f
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.log10.f32(float %a)
+  ret float %0
+}
+
+define double @log_f64(double %a) {
+; CHECK-LABEL: log_f64
+; CHECK-NOT: bl __xl_log
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.log.f64(double %a)
+  ret double %0
+}
+
+define float @logf_f32(float %a) {
+; CHECK-LABEL: logf_f32
+; CHECK-NOT: bl __xl_logf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.log.f32(float %a)
+  ret float %0
+}
+
+define double @log10_f64(double %a) {
+; CHECK-LABEL: log10_f64
+; CHECK-NOT: bl __xl_log10
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.log10.f64(double %a)
+  ret double %0
+}
+
+define float @powf_f32(float %a, float %b) {
+; CHECK-LABEL: powf_f32
+; CHECK-NOT: bl __xl_powf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.pow.f32(float %a, float %b)
+  ret float %0
+}
+
+define double @pow_f64(double %a, double %b) {
+; CHECK-LABEL: pow_f64
+; CHECK-NOT: bl __xl_pow
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.pow.f64(double %a, double %b)
+  ret double %0
+}
+
+define float @rintf_f32(float %a) {
+; CHECK-LABEL: rintf_f32
+; CHECK-NOT: bl __xl_rintf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.rint.f32(float %a)
+  ret float %0
+}
+
+define float @sinf_f32(float %a) {
+; CHECK-LABEL: sinf_f32
+; CHECK-NOT: bl __xl_sinf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.sin.f32(float %a)
+  ret float %0
+}
+
+define double @sin_f64(double %a) {
+; CHECK-LABEL: sin_f64
+; CHECK-NOT: bl __xl_sin
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.sin.f64(double %a)
+  ret double %0
+}
+
diff --git a/llvm/test/CodeGen/PowerPC/lower-intrinsics-nofast-mass.ll b/llvm/test/CodeGen/PowerPC/lower-intrinsics-nofast-mass.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/lower-intrinsics-nofast-mass.ll
@@ -0,0 +1,146 @@
+; RUN: llc -O3 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+
+declare float @llvm.cos.f32(float) 
+declare float @llvm.exp.f32(float) 
+declare float @llvm.log10.f32(float) 
+declare float @llvm.log.f32(float) 
+declare float @llvm.pow.f32(float, float) 
+declare float @llvm.rint.f32(float) 
+declare float @llvm.sin.f32(float) 
+declare double @llvm.cos.f64(double) 
+declare double @llvm.exp.f64(double) 
+declare double @llvm.log.f64(double) 
+declare double @llvm.log10.f64(double) 
+declare double @llvm.pow.f64(double, double) 
+declare double @llvm.sin.f64(double) 
+
+
+; With no fast math flag specified per-function
+define float @cosf_f32_nofast(float %a) {
+; CHECK-LABEL: cosf_f32_nofast
+; CHECK-NOT: bl __xl_cosf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.cos.f32(float %a)
+  ret float %0
+}
+
+; With no fast math flag specified per-function
+define float @expf_f32_nofast(float %a) {
+; CHECK-LABEL: expf_f32_nofast
+; CHECK-NOT: bl __xl_expf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.exp.f32(float %a)
+  ret float %0
+}
+
+; With no fast math flag specified per-function
+define float @log10f_f32_nofast(float %a) {
+; CHECK-LABEL: log10f_f32_nofast
+; CHECK-NOT: bl __xl_log10f
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.log10.f32(float %a)
+  ret float %0
+}
+
+; With no fast math flag specified per-function
+define float @logf_f32_nofast(float %a) {
+; CHECK-LABEL: logf_f32_nofast
+; CHECK-NOT: bl __xl_logf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.log.f32(float %a)
+  ret float %0
+}
+
+; With no fast math flag specified per-function
+define float @powf_f32_nofast(float %a, float %b) {
+; CHECK-LABEL: powf_f32_nofast
+; CHECK-NOT: bl __xl_powf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.pow.f32(float %a, float %b)
+  ret float %0
+}
+
+; With no fast math flag specified per-function
+define float @rintf_f32_nofast(float %a) {
+; CHECK-LABEL: rintf_f32_nofast
+; CHECK-NOT: bl __xl_rintf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.rint.f32(float %a)
+  ret float %0
+}
+
+; With no fast math flag specified per-function
+define float @sinf_f32_nofast(float %a) {
+; CHECK-LABEL: sinf_f32_nofast
+; CHECK-NOT: bl __xl_sinf
+; CHECK: blr
+entry:
+  %0 = tail call float @llvm.sin.f32(float %a)
+  ret float %0
+}
+
+; With no fast math flag specified per-function
+define double @cos_f64_nofast(double %a) {
+; CHECK-LABEL: cos_f64_nofast
+; CHECK-NOT: bl __xl_cos
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.cos.f64(double %a)
+  ret double %0
+}
+
+; With no fast math flag specified per-function
+define double @exp_f64_nofast(double %a) {
+; CHECK-LABEL: exp_f64_nofast
+; CHECK-NOT: bl __xl_exp
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.exp.f64(double %a)
+  ret double %0
+}
+
+; With no fast math flag specified per-function
+define double @log_f64_nofast(double %a) {
+; CHECK-LABEL: log_f64_nofast
+; CHECK-NOT: bl __xl_log
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.log.f64(double %a)
+  ret double %0
+}
+
+; With no fast math flag specified per-function
+define double @log10_f64_nofast(double %a) {
+; CHECK-LABEL: log10_f64_nofast
+; CHECK-NOT: bl __xl_log10
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.log10.f64(double %a)
+  ret double %0
+}
+
+; With no fast math flag specified per-function
+define double @pow_f64_nofast(double %a, double %b) {
+; CHECK-LABEL: pow_f64_nofast
+; CHECK-NOT: bl __xl_pow
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.pow.f64(double %a, double %b)
+  ret double %0
+}
+
+; With no fast math flag specified per-function
+define double @sin_f64_nofast(double %a) {
+; CHECK-LABEL: sin_f64_nofast
+; CHECK-NOT: bl __xl_sin
+; CHECK: blr
+entry:
+  %0 = tail call double @llvm.sin.f64(double %a)
+  ret double %0
+}
diff --git a/llvm/test/CodeGen/PowerPC/lower-scalar-mass-afn.ll b/llvm/test/CodeGen/PowerPC/lower-scalar-mass-afn.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/lower-scalar-mass-afn.ll
@@ -0,0 +1,790 @@
+; RUN: llc -O3 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -O3 -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck %s
+
+declare float @acosf (float);
+declare float @acoshf (float);
+declare float @asinf (float);
+declare float @asinhf (float);
+declare float @atan2f (float, float);
+declare float @atanf (float);
+declare float @atanhf (float);
+declare float @cbrtf (float);
+declare float @copysignf (float, float);
+declare float @cosf (float);
+declare float @coshf (float);
+declare float @erfcf (float);
+declare float @erff (float);
+declare float @expf (float);
+declare float @expm1f (float);
+declare float @hypotf (float, float);
+declare float @lgammaf (float);
+declare float @log10f (float);
+declare float @log1pf (float);
+declare float @logf (float);
+declare float @powf (float, float);
+declare float @rintf (float);
+declare float @sinf (float);
+declare float @sinhf (float);
+declare float @tanf (float);
+declare float @tanhf (float);
+declare double @acos (double);
+declare double @acosh (double);
+declare double @anint (double);
+declare double @asin (double);
+declare double @asinh (double);
+declare double @atan (double);
+declare double @atan2 (double, double);
+declare double @atanh (double);
+declare double @cbrt (double);
+declare double @copysign (double, double);
+declare double @cos (double);
+declare double @cosh (double);
+declare double @cosisin (double);
+declare double @dnint (double);
+declare double @erf (double);
+declare double @erfc (double);
+declare double @exp (double);
+declare double @expm1 (double);
+declare double @hypot (double, double);
+declare double @lgamma (double);
+declare double @log (double);
+declare double @log10 (double);
+declare double @log1p (double);
+declare double @pow (double, double);
+declare double @rsqrt (double);
+declare double @sin (double);
+declare double @sincos (double);
+declare double @sinh (double);
+declare double @sqrt (double);
+declare double @tan (double);
+declare double @tanh (double);
+declare float @__acosf_finite (float);
+declare float @__acoshf_finite (float);
+declare float @__asinf_finite (float);
+declare float @__atan2f_finite (float, float);
+declare float @__atanhf_finite (float);
+declare float @__coshf_finite (float);
+declare float @__expf_finite (float);
+declare float @__logf_finite (float);
+declare float @__log10f_finite (float);
+declare float @__powf_finite (float, float);
+declare float @__sinhf_finite (float);
+declare double @__acos_finite (double);
+declare double @__acosh_finite (double);
+declare double @__asin_finite (double);
+declare double @__atan2_finite (double, double);
+declare double @__atanh_finite (double);
+declare double @__cosh_finite (double);
+declare double @__exp_finite (double);
+declare double @__log_finite (double);
+declare double @__log10_finite (double);
+declare double @__pow_finite (double, double);
+declare double @__sinh_finite (double);
+
+define float @acosf_f32(float %a) #0 {
+; CHECK-LABEL: acosf_f32
+; CHECK: __xl_acosf
+; CHECK: blr
+entry:
+  %call = tail call afn float @acosf(float %a)
+  ret float %call
+}
+
+define float @acoshf_f32(float %a) #0 {
+; CHECK-LABEL: acoshf_f32
+; CHECK: __xl_acoshf
+; CHECK: blr
+entry:
+  %call = tail call afn float @acoshf(float %a)
+  ret float %call
+}
+
+define float @asinf_f32(float %a) #0 {
+; CHECK-LABEL: asinf_f32
+; CHECK: __xl_asinf
+; CHECK: blr
+entry:
+  %call = tail call afn float @asinf(float %a)
+  ret float %call
+}
+
+define float @asinhf_f32(float %a) #0 {
+; CHECK-LABEL: asinhf_f32
+; CHECK: __xl_asinhf
+; CHECK: blr
+entry:
+  %call = tail call afn float @asinhf(float %a)
+  ret float %call
+}
+
+define float @atan2f_f32(float %a, float %b) #0 {
+; CHECK-LABEL: atan2f_f32
+; CHECK: __xl_atan2f
+; CHECK: blr
+entry:
+  %call = tail call afn float @atan2f(float %a, float %b)
+  ret float %call
+}
+
+define float @atanf_f32(float %a) #0 {
+; CHECK-LABEL: atanf_f32
+; CHECK: __xl_atanf
+; CHECK: blr
+entry:
+  %call = tail call afn float @atanf(float %a)
+  ret float %call
+}
+
+define float @atanhf_f32(float %a) #0 {
+; CHECK-LABEL: atanhf_f32
+; CHECK: __xl_atanhf
+; CHECK: blr
+entry:
+  %call = tail call afn float @atanhf(float %a)
+  ret float %call
+}
+
+define float @cbrtf_f32(float %a) #0 {
+; CHECK-LABEL: cbrtf_f32
+; CHECK: __xl_cbrtf
+; CHECK: blr
+entry:
+  %call = tail call afn float @cbrtf(float %a)
+  ret float %call
+}
+
+define float @copysignf_f32(float %a, float %b) #0 {
+; CHECK-LABEL: copysignf_f32
+; CHECK: copysignf
+; CHECK: blr
+entry:
+  %call = tail call afn float @copysignf(float %a, float %b)
+  ret float %call
+}
+
+define float @cosf_f32(float %a) #0 {
+; CHECK-LABEL: cosf_f32
+; CHECK: __xl_cosf
+; CHECK: blr
+entry:
+  %call = tail call afn float @cosf(float %a)
+  ret float %call
+}
+
+define float @coshf_f32(float %a) #0 {
+; CHECK-LABEL: coshf_f32
+; CHECK: __xl_coshf
+; CHECK: blr
+entry:
+  %call = tail call afn float @coshf(float %a)
+  ret float %call
+}
+
+define float @erfcf_f32(float %a) #0 {
+; CHECK-LABEL: erfcf_f32
+; CHECK: __xl_erfcf
+; CHECK: blr
+entry:
+  %call = tail call afn float @erfcf(float %a)
+  ret float %call
+}
+
+define float @erff_f32(float %a) #0 {
+; CHECK-LABEL: erff_f32
+; CHECK: __xl_erff
+; CHECK: blr
+entry:
+  %call = tail call afn float @erff(float %a)
+  ret float %call
+}
+
+define float @expf_f32(float %a) #0 {
+; CHECK-LABEL: expf_f32
+; CHECK: __xl_expf
+; CHECK: blr
+entry:
+  %call = tail call afn float @expf(float %a)
+  ret float %call
+}
+
+define float @expm1f_f32(float %a) #0 {
+; CHECK-LABEL: expm1f_f32
+; CHECK: __xl_expm1f
+; CHECK: blr
+entry:
+  %call = tail call afn float @expm1f(float %a)
+  ret float %call
+}
+
+define float @hypotf_f32(float %a, float %b) #0 {
+; CHECK-LABEL: hypotf_f32
+; CHECK: __xl_hypotf
+; CHECK: blr
+entry:
+  %call = tail call afn float @hypotf(float %a, float %b)
+  ret float %call
+}
+
+define float @lgammaf_f32(float %a) #0 {
+; CHECK-LABEL: lgammaf_f32
+; CHECK: __xl_lgammaf
+; CHECK: blr
+entry:
+  %call = tail call afn float @lgammaf(float %a)
+  ret float %call
+}
+
+define float @log10f_f32(float %a) #0 {
+; CHECK-LABEL: log10f_f32
+; CHECK: __xl_log10f
+; CHECK: blr
+entry:
+  %call = tail call afn float @log10f(float %a)
+  ret float %call
+}
+
+define float @log1pf_f32(float %a) #0 {
+; CHECK-LABEL: log1pf_f32
+; CHECK: __xl_log1pf
+; CHECK: blr
+entry:
+  %call = tail call afn float @log1pf(float %a)
+  ret float %call
+}
+
+define float @logf_f32(float %a) #0 {
+; CHECK-LABEL: logf_f32
+; CHECK: __xl_logf
+; CHECK: blr
+entry:
+  %call = tail call afn float @logf(float %a)
+  ret float %call
+}
+
+define float @powf_f32(float %a, float %b) #0 {
+; CHECK-LABEL: powf_f32
+; CHECK: __xl_powf
+; CHECK: blr
+entry:
+  %call = tail call afn float @powf(float %a, float %b)
+  ret float %call
+}
+
+define float @rintf_f32(float %a) #0 {
+; CHECK-LABEL: rintf_f32
+; CHECK-NOT: __xl_rintf
+; CHECK: blr
+entry:
+  %call = tail call afn float @rintf(float %a)
+  ret float %call
+}
+
+define float @sinf_f32(float %a) #0 {
+; CHECK-LABEL: sinf_f32
+; CHECK: __xl_sinf
+; CHECK: blr
+entry:
+  %call = tail call afn float @sinf(float %a)
+  ret float %call
+}
+
+define float @sinhf_f32(float %a) #0 {
+; CHECK-LABEL: sinhf_f32
+; CHECK: __xl_sinhf
+; CHECK: blr
+entry:
+  %call = tail call afn float @sinhf(float %a)
+  ret float %call
+}
+
+define float @tanf_f32(float %a) #0 {
+; CHECK-LABEL: tanf_f32
+; CHECK: __xl_tanf
+; CHECK: blr
+entry:
+  %call = tail call afn float @tanf(float %a)
+  ret float %call
+}
+
+define float @tanhf_f32(float %a) #0 {
+; CHECK-LABEL: tanhf_f32
+; CHECK: __xl_tanhf
+; CHECK: blr
+entry:
+  %call = tail call afn float @tanhf(float %a)
+  ret float %call
+}
+
+define double @acos_f64(double %a) #0 {
+; CHECK-LABEL: acos_f64
+; CHECK: __xl_acos
+; CHECK: blr
+entry:
+  %call = tail call afn double @acos(double %a)
+  ret double %call
+}
+
+define double @acosh_f64(double %a) #0 {
+; CHECK-LABEL: acosh_f64
+; CHECK: __xl_acosh
+; CHECK: blr
+entry:
+  %call = tail call afn double @acosh(double %a)
+  ret double %call
+}
+
+define double @anint_f64(double %a) #0 {
+; CHECK-LABEL: anint_f64
+; CHECK-NOT: __xl_anint
+; CHECK: blr
+entry:
+  %call = tail call afn double @anint(double %a)
+  ret double %call
+}
+
+define double @asin_f64(double %a) #0 {
+; CHECK-LABEL: asin_f64
+; CHECK: __xl_asin
+; CHECK: blr
+entry:
+  %call = tail call afn double @asin(double %a)
+  ret double %call
+}
+
+define double @asinh_f64(double %a) #0 {
+; CHECK-LABEL: asinh_f64
+; CHECK: __xl_asinh
+; CHECK: blr
+entry:
+  %call = tail call afn double @asinh(double %a)
+  ret double %call
+}
+
+define double @atan_f64(double %a) #0 {
+; CHECK-LABEL: atan_f64
+; CHECK: __xl_atan
+; CHECK: blr
+entry:
+  %call = tail call afn double @atan(double %a)
+  ret double %call
+}
+
+define double @atan2_f64(double %a, double %b) #0 {
+; CHECK-LABEL: atan2_f64
+; CHECK: __xl_atan2
+; CHECK: blr
+entry:
+  %call = tail call afn double @atan2(double %a, double %b)
+  ret double %call
+}
+
+define double @atanh_f64(double %a) #0 {
+; CHECK-LABEL: atanh_f64
+; CHECK: __xl_atanh
+; CHECK: blr
+entry:
+  %call = tail call afn double @atanh(double %a)
+  ret double %call
+}
+
+define double @cbrt_f64(double %a) #0 {
+; CHECK-LABEL: cbrt_f64
+; CHECK: __xl_cbrt
+; CHECK: blr
+entry:
+  %call = tail call afn double @cbrt(double %a)
+  ret double %call
+}
+
+define double @copysign_f64(double %a, double %b) #0 {
+; CHECK-LABEL: copysign_f64
+; CHECK: copysign
+; CHECK: blr
+entry:
+  %call = tail call afn double @copysign(double %a, double %b)
+  ret double %call
+}
+
+define double @cos_f64(double %a) #0 {
+; CHECK-LABEL: cos_f64
+; CHECK: __xl_cos
+; CHECK: blr
+entry:
+  %call = tail call afn double @cos(double %a)
+  ret double %call
+}
+
+define double @cosh_f64(double %a) #0 {
+; CHECK-LABEL: cosh_f64
+; CHECK: __xl_cosh
+; CHECK: blr
+entry:
+  %call = tail call afn double @cosh(double %a)
+  ret double %call
+}
+
+define double @cosisin_f64(double %a) #0 {
+; CHECK-LABEL: cosisin_f64
+; CHECK-NOT: __xl_cosisin
+; CHECK: blr
+entry:
+  %call = tail call afn double @cosisin(double %a)
+  ret double %call
+}
+
+define double @dnint_f64(double %a) #0 {
+; CHECK-LABEL: dnint_f64
+; CHECK-NOT: __xl_dnint
+; CHECK: blr
+entry:
+  %call = tail call afn double @dnint(double %a)
+  ret double %call
+}
+
+define double @erf_f64(double %a) #0 {
+; CHECK-LABEL: erf_f64
+; CHECK: __xl_erf
+; CHECK: blr
+entry:
+  %call = tail call afn double @erf(double %a)
+  ret double %call
+}
+
+define double @erfc_f64(double %a) #0 {
+; CHECK-LABEL: erfc_f64
+; CHECK: __xl_erfc
+; CHECK: blr
+entry:
+  %call = tail call afn double @erfc(double %a)
+  ret double %call
+}
+
+define double @exp_f64(double %a) #0 {
+; CHECK-LABEL: exp_f64
+; CHECK: __xl_exp
+; CHECK: blr
+entry:
+  %call = tail call afn double @exp(double %a)
+  ret double %call
+}
+
+define double @expm1_f64(double %a) #0 {
+; CHECK-LABEL: expm1_f64
+; CHECK: __xl_expm1
+; CHECK: blr
+entry:
+  %call = tail call afn double @expm1(double %a)
+  ret double %call
+}
+
+define double @hypot_f64(double %a, double %b) #0 {
+; CHECK-LABEL: hypot_f64
+; CHECK: __xl_hypot
+; CHECK: blr
+entry:
+  %call = tail call afn double @hypot(double %a, double %b)
+  ret double %call
+}
+
+define double @lgamma_f64(double %a) #0 {
+; CHECK-LABEL: lgamma_f64
+; CHECK: __xl_lgamma
+; CHECK: blr
+entry:
+  %call = tail call afn double @lgamma(double %a)
+  ret double %call
+}
+
+define double @log_f64(double %a) #0 {
+; CHECK-LABEL: log_f64
+; CHECK: __xl_log
+; CHECK: blr
+entry:
+  %call = tail call afn double @log(double %a)
+  ret double %call
+}
+
+define double @log10_f64(double %a) #0 {
+; CHECK-LABEL: log10_f64
+; CHECK: __xl_log10
+; CHECK: blr
+entry:
+  %call = tail call afn double @log10(double %a)
+  ret double %call
+}
+
+define double @log1p_f64(double %a) #0 {
+; CHECK-LABEL: log1p_f64
+; CHECK: __xl_log1p
+; CHECK: blr
+entry:
+  %call = tail call afn double @log1p(double %a)
+  ret double %call
+}
+
+define double @pow_f64(double %a, double %b) #0 {
+; CHECK-LABEL: pow_f64
+; CHECK: __xl_pow
+; CHECK: blr
+entry:
+  %call = tail call afn double @pow(double %a, double %b)
+  ret double %call
+}
+
+define double @rsqrt_f64(double %a) #0 {
+; CHECK-LABEL: rsqrt_f64
+; CHECK: __xl_rsqrt
+; CHECK: blr
+entry:
+  %call = tail call afn double @rsqrt(double %a)
+  ret double %call
+}
+
+define double @sin_f64(double %a) #0 {
+; CHECK-LABEL: sin_f64
+; CHECK: __xl_sin
+; CHECK: blr
+entry:
+  %call = tail call afn double @sin(double %a)
+  ret double %call
+}
+
+define double @sincos_f64(double %a) #0 {
+; CHECK-LABEL: sincos_f64
+; CHECK-NOT: __xl_sincos
+; CHECK: blr
+entry:
+  %call = tail call afn double @sincos(double %a)
+  ret double %call
+}
+
+define double @sinh_f64(double %a) #0 {
+; CHECK-LABEL: sinh_f64
+; CHECK: __xl_sinh
+; CHECK: blr
+entry:
+  %call = tail call afn double @sinh(double %a)
+  ret double %call
+}
+
+define double @sqrt_f64(double %a) #0 {
+; CHECK-LABEL: sqrt_f64
+; CHECK: __xl_sqrt
+; CHECK: blr
+entry:
+  %call = tail call afn double @sqrt(double %a)
+  ret double %call
+}
+
+define double @tan_f64(double %a) #0 {
+; CHECK-LABEL: tan_f64
+; CHECK: __xl_tan
+; CHECK: blr
+entry:
+  %call = tail call afn double @tan(double %a)
+  ret double %call
+}
+
+define double @tanh_f64(double %a) #0 {
+; CHECK-LABEL: tanh_f64
+; CHECK: __xl_tanh
+; CHECK: blr
+entry:
+  %call = tail call afn double @tanh(double %a)
+  ret double %call
+}
+
+define float @__acosf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __acosf_finite_f32
+; CHECK: __xl_acosf
+; CHECK: blr
+entry:
+  %call = tail call afn float @__acosf_finite(float %a)
+  ret float %call
+}
+
+define float @__acoshf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __acoshf_finite_f32
+; CHECK: __xl_acoshf
+; CHECK: blr
+entry:
+  %call = tail call afn float @__acoshf_finite(float %a)
+  ret float %call
+}
+
+define float @__asinf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __asinf_finite_f32
+; CHECK: __xl_asinf
+; CHECK: blr
+entry:
+  %call = tail call afn float @__asinf_finite(float %a)
+  ret float %call
+}
+
+define float @__atan2f_finite_f32(float %a, float %b) #0 {
+; CHECK-LABEL: __atan2f_finite_f32
+; CHECK: __xl_atan2f
+; CHECK: blr
+entry:
+  %call = tail call afn float @__atan2f_finite(float %a, float %b)
+  ret float %call
+}
+
+define float @__atanhf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __atanhf_finite_f32
+; CHECK: __xl_atanhf
+; CHECK: blr
+entry:
+  %call = tail call afn float @__atanhf_finite(float %a)
+  ret float %call
+}
+
+define float @__coshf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __coshf_finite_f32
+; CHECK: __xl_coshf
+; CHECK: blr
+entry:
+  %call = tail call afn float @__coshf_finite(float %a)
+  ret float %call
+}
+define float @__expf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __expf_finite_f32
+; CHECK: __xl_expf
+; CHECK: blr
+entry:
+  %call = tail call afn float @__expf_finite(float %a)
+  ret float %call
+}
+define float @__logf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __logf_finite_f32
+; CHECK: __xl_logf
+; CHECK: blr
+entry:
+  %call = tail call afn float @__logf_finite(float %a)
+  ret float %call
+}
+define float @__log10f_finite_f32(float %a) #0 {
+; CHECK-LABEL: __log10f_finite_f32
+; CHECK: __xl_log10f
+; CHECK: blr
+entry:
+  %call = tail call afn float @__log10f_finite(float %a)
+  ret float %call
+}
+define float @__powf_finite_f32(float %a, float %b) #0 {
+; CHECK-LABEL: __powf_finite_f32
+; CHECK: __xl_powf
+; CHECK: blr
+entry:
+  %call = tail call afn float @__powf_finite(float %a, float %b)
+  ret float %call
+}
+define float @__sinhf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __sinhf_finite_f32
+; CHECK: __xl_sinhf
+; CHECK: blr
+entry:
+  %call = tail call afn float @__sinhf_finite(float %a)
+  ret float %call
+}
+
+define double @__acos_finite_f64(double %a) #0 {
+; CHECK-LABEL: __acos_finite_f64
+; CHECK: __xl_acos
+; CHECK: blr
+entry:
+  %call = tail call afn double @__acos_finite(double %a)
+  ret double %call
+}
+
+define double @__acosh_finite_f64(double %a) #0 {
+; CHECK-LABEL: __acosh_finite_f64
+; CHECK: __xl_acosh
+; CHECK: blr
+entry:
+  %call = tail call afn double @__acosh_finite(double %a)
+  ret double %call
+}
+
+define double @__asin_finite_f64(double %a) #0 {
+; CHECK-LABEL: __asin_finite_f64
+; CHECK: __xl_asin
+; CHECK: blr
+entry:
+  %call = tail call afn double @__asin_finite(double %a)
+  ret double %call
+}
+
+define double @__atan2_finite_f64(double %a, double %b) #0 {
+; CHECK-LABEL: __atan2_finite_f64
+; CHECK: __xl_atan2
+; CHECK: blr
+entry:
+  %call = tail call afn double @__atan2_finite(double %a, double %b)
+  ret double %call
+}
+
+define double @__atanh_finite_f64(double %a) #0 {
+; CHECK-LABEL: __atanh_finite_f64
+; CHECK: __xl_atanh
+; CHECK: blr
+entry:
+  %call = tail call afn double @__atanh_finite(double %a)
+  ret double %call
+}
+
+define double @__cosh_finite_f64(double %a) #0 {
+; CHECK-LABEL: __cosh_finite_f64
+; CHECK: __xl_cosh
+; CHECK: blr
+entry:
+  %call = tail call afn double @__cosh_finite(double %a)
+  ret double %call
+}
+
+define double @__exp_finite_f64(double %a) #0 {
+; CHECK-LABEL: __exp_finite_f64
+; CHECK: __xl_exp
+; CHECK: blr
+entry:
+  %call = tail call afn double @__exp_finite(double %a)
+  ret double %call
+}
+
+define double @__log_finite_f64(double %a) #0 {
+; CHECK-LABEL: __log_finite_f64
+; CHECK: __xl_log
+; CHECK: blr
+entry:
+  %call = tail call afn double @__log_finite(double %a)
+  ret double %call
+}
+
+define double @__log10_finite_f64(double %a) #0 {
+; CHECK-LABEL: __log10_finite_f64
+; CHECK: __xl_log10
+; CHECK: blr
+entry:
+  %call = tail call afn double @__log10_finite(double %a)
+  ret double %call
+}
+
+define double @__pow_finite_f64(double %a, double %b) #0 {
+; CHECK-LABEL: __pow_finite_f64
+; CHECK: __xl_pow
+; CHECK: blr
+entry:
+  %call = tail call afn double @__pow_finite(double %a, double %b)
+  ret double %call
+}
+
+define double @__sinh_finite_f64(double %a) #0 {
+; CHECK-LABEL: __sinh_finite_f64
+; CHECK: __xl_sinh
+; CHECK: blr
+entry:
+  %call = tail call afn double @__sinh_finite(double %a)
+  ret double %call
+}
+
+attributes #0 = { "approx-func-fp-math"="true" }
diff --git a/llvm/test/CodeGen/PowerPC/lower-scalar-mass-fast.ll b/llvm/test/CodeGen/PowerPC/lower-scalar-mass-fast.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/lower-scalar-mass-fast.ll
@@ -0,0 +1,1584 @@
+; RUN: llc -O3 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -O3 -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck %s
+
+declare float @acosf (float);
+declare float @acoshf (float);
+declare float @asinf (float);
+declare float @asinhf (float);
+declare float @atan2f (float, float);
+declare float @atanf (float);
+declare float @atanhf (float);
+declare float @cbrtf (float);
+declare float @copysignf (float, float);
+declare float @cosf (float);
+declare float @coshf (float);
+declare float @erfcf (float);
+declare float @erff (float);
+declare float @expf (float);
+declare float @expm1f (float);
+declare float @hypotf (float, float);
+declare float @lgammaf (float);
+declare float @log10f (float);
+declare float @log1pf (float);
+declare float @logf (float);
+declare float @powf (float, float);
+declare float @rintf (float);
+declare float @sinf (float);
+declare float @sinhf (float);
+declare float @tanf (float);
+declare float @tanhf (float);
+declare double @acos (double);
+declare double @acosh (double);
+declare double @anint (double);
+declare double @asin (double);
+declare double @asinh (double);
+declare double @atan (double);
+declare double @atan2 (double);
+declare double @atanh (double);
+declare double @cbrt (double);
+declare double @copysign (double, double);
+declare double @cos (double);
+declare double @cosh (double);
+declare double @cosisin (double);
+declare double @dnint (double);
+declare double @erf (double);
+declare double @erfc (double);
+declare double @exp (double);
+declare double @expm1 (double);
+declare double @hypot (double, double);
+declare double @lgamma (double);
+declare double @log (double);
+declare double @log10 (double);
+declare double @log1p (double);
+declare double @pow (double, double);
+declare double @rsqrt (double);
+declare double @sin (double);
+declare double @sincos (double);
+declare double @sinh (double);
+declare double @sqrt (double);
+declare double @tan (double);
+declare double @tanh (double);
+declare float @__acosf_finite (float);
+declare float @__acoshf_finite (float);
+declare float @__asinf_finite (float);
+declare float @__atan2f_finite (float, float);
+declare float @__atanhf_finite (float);
+declare float @__coshf_finite (float);
+declare float @__expf_finite (float);
+declare float @__logf_finite (float);
+declare float @__log10f_finite (float);
+declare float @__powf_finite (float, float);
+declare float @__sinhf_finite (float);
+declare double @__acos_finite (double);
+declare double @__acosh_finite (double);
+declare double @__asin_finite (double);
+declare double @__atan2_finite (double, double);
+declare double @__atanh_finite (double);
+declare double @__cosh_finite (double);
+declare double @__exp_finite (double);
+declare double @__log_finite (double);
+declare double @__log10_finite (double);
+declare double @__pow_finite (double, double);
+declare double @__sinh_finite (double);
+
+define float @acosf_f32(float %a) #0 {
+; CHECK-LABEL: acosf_f32
+; CHECK: __xl_acosf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @acosf(float %a)
+  ret float %call
+}
+
+define float @acoshf_f32(float %a) #0 {
+; CHECK-LABEL: acoshf_f32
+; CHECK: __xl_acoshf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @acoshf(float %a)
+  ret float %call
+}
+
+define float @asinf_f32(float %a) #0 {
+; CHECK-LABEL: asinf_f32
+; CHECK: __xl_asinf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @asinf(float %a)
+  ret float %call
+}
+
+define float @asinhf_f32(float %a) #0 {
+; CHECK-LABEL: asinhf_f32
+; CHECK: __xl_asinhf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @asinhf(float %a)
+  ret float %call
+}
+
+define float @atan2f_f32(float %a, float %b) #0 {
+; CHECK-LABEL: atan2f_f32
+; CHECK: __xl_atan2f_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @atan2f(float %a, float %b)
+  ret float %call
+}
+
+define float @atanf_f32(float %a) #0 {
+; CHECK-LABEL: atanf_f32
+; CHECK: __xl_atanf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @atanf(float %a)
+  ret float %call
+}
+
+define float @atanhf_f32(float %a) #0 {
+; CHECK-LABEL: atanhf_f32
+; CHECK: __xl_atanhf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @atanhf(float %a)
+  ret float %call
+}
+
+define float @cbrtf_f32(float %a) #0 {
+; CHECK-LABEL: cbrtf_f32
+; CHECK: __xl_cbrtf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @cbrtf(float %a)
+  ret float %call
+}
+
+define float @copysignf_f32(float %a, float %b) #0 {
+; CHECK-LABEL: copysignf_f32
+; CHECK: copysignf
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @copysignf(float %a, float %b)
+  ret float %call
+}
+
+define float @cosf_f32(float %a) #0 {
+; CHECK-LABEL: cosf_f32
+; CHECK: __xl_cosf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @cosf(float %a)
+  ret float %call
+}
+
+define float @coshf_f32(float %a) #0 {
+; CHECK-LABEL: coshf_f32
+; CHECK: __xl_coshf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @coshf(float %a)
+  ret float %call
+}
+
+define float @erfcf_f32(float %a) #0 {
+; CHECK-LABEL: erfcf_f32
+; CHECK: __xl_erfcf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @erfcf(float %a)
+  ret float %call
+}
+
+define float @erff_f32(float %a) #0 {
+; CHECK-LABEL: erff_f32
+; CHECK: __xl_erff_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @erff(float %a)
+  ret float %call
+}
+
+define float @expf_f32(float %a) #0 {
+; CHECK-LABEL: expf_f32
+; CHECK: __xl_expf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @expf(float %a)
+  ret float %call
+}
+
+define float @expm1f_f32(float %a) #0 {
+; CHECK-LABEL: expm1f_f32
+; CHECK: __xl_expm1f_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @expm1f(float %a)
+  ret float %call
+}
+
+define float @hypotf_f32(float %a, float %b) #0 {
+; CHECK-LABEL: hypotf_f32
+; CHECK: __xl_hypotf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @hypotf(float %a, float %b)
+  ret float %call
+}
+
+define float @lgammaf_f32(float %a) #0 {
+; CHECK-LABEL: lgammaf_f32
+; CHECK: __xl_lgammaf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @lgammaf(float %a)
+  ret float %call
+}
+
+define float @log10f_f32(float %a) #0 {
+; CHECK-LABEL: log10f_f32
+; CHECK: __xl_log10f_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @log10f(float %a)
+  ret float %call
+}
+
+define float @log1pf_f32(float %a) #0 {
+; CHECK-LABEL: log1pf_f32
+; CHECK: __xl_log1pf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @log1pf(float %a)
+  ret float %call
+}
+
+define float @logf_f32(float %a) #0 {
+; CHECK-LABEL: logf_f32
+; CHECK: __xl_logf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @logf(float %a)
+  ret float %call
+}
+
+define float @powf_f32(float %a, float %b) #0 {
+; CHECK-LABEL: powf_f32
+; CHECK: __xl_powf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @powf(float %a, float %b)
+  ret float %call
+}
+
+define float @rintf_f32(float %a) #0 {
+; CHECK-LABEL: rintf_f32
+; CHECK-NOT: __xl_rintf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @rintf(float %a)
+  ret float %call
+}
+
+define float @sinf_f32(float %a) #0 {
+; CHECK-LABEL: sinf_f32
+; CHECK: __xl_sinf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @sinf(float %a)
+  ret float %call
+}
+
+define float @sinhf_f32(float %a) #0 {
+; CHECK-LABEL: sinhf_f32
+; CHECK: __xl_sinhf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @sinhf(float %a)
+  ret float %call
+}
+
+define float @tanf_f32(float %a) #0 {
+; CHECK-LABEL: tanf_f32
+; CHECK: __xl_tanf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @tanf(float %a)
+  ret float %call
+}
+
+define float @tanhf_f32(float %a) #0 {
+; CHECK-LABEL: tanhf_f32
+; CHECK: __xl_tanhf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @tanhf(float %a)
+  ret float %call
+}
+
+define double @acos_f64(double %a) #0 {
+; CHECK-LABEL: acos_f64
+; CHECK: __xl_acos_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @acos(double %a)
+  ret double %call
+}
+
+define double @acosh_f64(double %a) #0 {
+; CHECK-LABEL: acosh_f64
+; CHECK: __xl_acosh_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @acosh(double %a)
+  ret double %call
+}
+
+define double @anint_f64(double %a) #0 {
+; CHECK-LABEL: anint_f64
+; CHECK-NOT: __xl_anint_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @anint(double %a)
+  ret double %call
+}
+
+define double @asin_f64(double %a) #0 {
+; CHECK-LABEL: asin_f64
+; CHECK: __xl_asin_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @asin(double %a)
+  ret double %call
+}
+
+define double @asinh_f64(double %a) #0 {
+; CHECK-LABEL: asinh_f64
+; CHECK: __xl_asinh_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @asinh(double %a)
+  ret double %call
+}
+
+define double @atan_f64(double %a) #0 {
+; CHECK-LABEL: atan_f64
+; CHECK: __xl_atan_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @atan(double %a)
+  ret double %call
+}
+
+define double @atan2_f64(double %a) #0 {
+; CHECK-LABEL: atan2_f64
+; CHECK: __xl_atan2_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @atan2(double %a)
+  ret double %call
+}
+
+define double @atanh_f64(double %a) #0 {
+; CHECK-LABEL: atanh_f64
+; CHECK: __xl_atanh_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @atanh(double %a)
+  ret double %call
+}
+
+define double @cbrt_f64(double %a) #0 {
+; CHECK-LABEL: cbrt_f64
+; CHECK: __xl_cbrt_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @cbrt(double %a)
+  ret double %call
+}
+
+define double @copysign_f64(double %a, double %b) #0 {
+; CHECK-LABEL: copysign_f64
+; CHECK: copysign
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @copysign(double %a, double %b)
+  ret double %call
+}
+
+define double @cos_f64(double %a) #0 {
+; CHECK-LABEL: cos_f64
+; CHECK: __xl_cos_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @cos(double %a)
+  ret double %call
+}
+
+define double @cosh_f64(double %a) #0 {
+; CHECK-LABEL: cosh_f64
+; CHECK: __xl_cosh_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @cosh(double %a)
+  ret double %call
+}
+
+define double @cosisin_f64(double %a) #0 {
+; CHECK-LABEL: cosisin_f64
+; CHECK-NOT: __xl_cosisin_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @cosisin(double %a)
+  ret double %call
+}
+
+define double @dnint_f64(double %a) #0 {
+; CHECK-LABEL: dnint_f64
+; CHECK-NOT: __xl_dnint_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @dnint(double %a)
+  ret double %call
+}
+
+define double @erf_f64(double %a) #0 {
+; CHECK-LABEL: erf_f64
+; CHECK: __xl_erf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @erf(double %a)
+  ret double %call
+}
+
+define double @erfc_f64(double %a) #0 {
+; CHECK-LABEL: erfc_f64
+; CHECK: __xl_erfc_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @erfc(double %a)
+  ret double %call
+}
+
+define double @exp_f64(double %a) #0 {
+; CHECK-LABEL: exp_f64
+; CHECK: __xl_exp_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @exp(double %a)
+  ret double %call
+}
+
+define double @expm1_f64(double %a) #0 {
+; CHECK-LABEL: expm1_f64
+; CHECK: __xl_expm1_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @expm1(double %a)
+  ret double %call
+}
+
+define double @hypot_f64(double %a, double %b) #0 {
+; CHECK-LABEL: hypot_f64
+; CHECK: __xl_hypot_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @hypot(double %a, double %b)
+  ret double %call
+}
+
+define double @lgamma_f64(double %a) #0 {
+; CHECK-LABEL: lgamma_f64
+; CHECK: __xl_lgamma_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @lgamma(double %a)
+  ret double %call
+}
+
+define double @log_f64(double %a) #0 {
+; CHECK-LABEL: log_f64
+; CHECK: __xl_log_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @log(double %a)
+  ret double %call
+}
+
+define double @log10_f64(double %a) #0 {
+; CHECK-LABEL: log10_f64
+; CHECK: __xl_log10_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @log10(double %a)
+  ret double %call
+}
+
+define double @log1p_f64(double %a) #0 {
+; CHECK-LABEL: log1p_f64
+; CHECK: __xl_log1p_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @log1p(double %a)
+  ret double %call
+}
+
+define double @pow_f64(double %a, double %b) #0 {
+; CHECK-LABEL: pow_f64
+; CHECK: __xl_pow_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @pow(double %a, double %b)
+  ret double %call
+}
+
+define double @rsqrt_f64(double %a) #0 {
+; CHECK-LABEL: rsqrt_f64
+; CHECK: __xl_rsqrt_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @rsqrt(double %a)
+  ret double %call
+}
+
+define double @sin_f64(double %a) #0 {
+; CHECK-LABEL: sin_f64
+; CHECK: __xl_sin_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @sin(double %a)
+  ret double %call
+}
+
+define double @sincos_f64(double %a) #0 {
+; CHECK-LABEL: sincos_f64
+; CHECK-NOT: __xl_sincos_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @sincos(double %a)
+  ret double %call
+}
+
+define double @sinh_f64(double %a) #0 {
+; CHECK-LABEL: sinh_f64
+; CHECK: __xl_sinh_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @sinh(double %a)
+  ret double %call
+}
+
+define double @sqrt_f64(double %a) #0 {
+; CHECK-LABEL: sqrt_f64
+; CHECK: __xl_sqrt_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @sqrt(double %a)
+  ret double %call
+}
+
+define double @tan_f64(double %a) #0 {
+; CHECK-LABEL: tan_f64
+; CHECK: __xl_tan_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @tan(double %a)
+  ret double %call
+}
+
+define double @tanh_f64(double %a) #0 {
+; CHECK-LABEL: tanh_f64
+; CHECK: __xl_tanh_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @tanh(double %a)
+  ret double %call
+}
+
+define float @__acosf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __acosf_finite_f32
+; CHECK: __xl_acosf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__acosf_finite(float %a)
+  ret float %call
+}
+
+define float @__acoshf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __acoshf_finite_f32
+; CHECK: __xl_acoshf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__acoshf_finite(float %a)
+  ret float %call
+}
+
+define float @__asinf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __asinf_finite_f32
+; CHECK: __xl_asinf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__asinf_finite(float %a)
+  ret float %call
+}
+
+define float @__atan2f_finite_f32(float %a, float %b) #0 {
+; CHECK-LABEL: __atan2f_finite_f32
+; CHECK: __xl_atan2f_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__atan2f_finite(float %a, float %b)
+  ret float %call
+}
+
+define float @__atanhf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __atanhf_finite_f32
+; CHECK: __xl_atanhf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__atanhf_finite(float %a)
+  ret float %call
+}
+
+define float @__coshf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __coshf_finite_f32
+; CHECK: __xl_coshf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__coshf_finite(float %a)
+  ret float %call
+}
+
+define float @__expf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __expf_finite_f32
+; CHECK: __xl_expf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__expf_finite(float %a)
+  ret float %call
+}
+
+define float @__logf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __logf_finite_f32
+; CHECK: __xl_logf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__logf_finite(float %a)
+  ret float %call
+}
+
+define float @__log10f_finite_f32(float %a) #0 {
+; CHECK-LABEL: __log10f_finite_f32
+; CHECK: __xl_log10f_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__log10f_finite(float %a)
+  ret float %call
+}
+
+define float @__powf_finite_f32(float %a, float %b) #0 {
+; CHECK-LABEL: __powf_finite_f32
+; CHECK: __xl_powf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__powf_finite(float %a, float %b)
+  ret float %call
+}
+
+define float @__sinhf_finite_f32(float %a) #0 {
+; CHECK-LABEL: __sinhf_finite_f32
+; CHECK: __xl_sinhf_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__sinhf_finite(float %a)
+  ret float %call
+}
+
+define double @__acos_finite_f64(double %a) #0 {
+; CHECK-LABEL: __acos_finite_f64
+; CHECK: __xl_acos_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__acos_finite(double %a)
+  ret double %call
+}
+
+define double @__acosh_finite_f64(double %a) #0 {
+; CHECK-LABEL: __acosh_finite_f64
+; CHECK: __xl_acosh_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__acosh_finite(double %a)
+  ret double %call
+}
+
+define double @__asin_finite_f64(double %a) #0 {
+; CHECK-LABEL: __asin_finite_f64
+; CHECK: __xl_asin_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__asin_finite(double %a)
+  ret double %call
+}
+
+define double @__atan2_finite_f64(double %a, double %b) #0 {
+; CHECK-LABEL: __atan2_finite_f64
+; CHECK: __xl_atan2_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__atan2_finite(double %a, double %b)
+  ret double %call
+}
+
+define double @__atanh_finite_f64(double %a) #0 {
+; CHECK-LABEL: __atanh_finite_f64
+; CHECK: __xl_atanh_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__atanh_finite(double %a)
+  ret double %call
+}
+
+define double @__cosh_finite_f64(double %a) #0 {
+; CHECK-LABEL: __cosh_finite_f64
+; CHECK: __xl_cosh_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__cosh_finite(double %a)
+  ret double %call
+}
+
+define double @__exp_finite_f64(double %a) #0 {
+; CHECK-LABEL: __exp_finite_f64
+; CHECK: __xl_exp_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__exp_finite(double %a)
+  ret double %call
+}
+
+define double @__log_finite_f64(double %a) #0 {
+; CHECK-LABEL: __log_finite_f64
+; CHECK: __xl_log_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__log_finite(double %a)
+  ret double %call
+}
+
+define double @__log10_finite_f64(double %a) #0 {
+; CHECK-LABEL: __log10_finite_f64
+; CHECK: __xl_log10_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__log10_finite(double %a)
+  ret double %call
+}
+
+define double @__pow_finite_f64(double %a, double %b) #0 {
+; CHECK-LABEL: __pow_finite_f64
+; CHECK: __xl_pow_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__pow_finite(double %a, double %b)
+  ret double %call
+}
+
+define double @__sinh_finite_f64(double %a) #0 {
+; CHECK-LABEL: __sinh_finite_f64
+; CHECK: __xl_sinh_finite
+; CHECK: blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__sinh_finite(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @acosf_f32_nofast(float %a) {
+; CHECK-LABEL: acosf_f32_nofast
+; CHECK-NOT: __xl_acosf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @acosf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @acoshf_f32_nofast(float %a) {
+; CHECK-LABEL: acoshf_f32_nofast
+; CHECK-NOT: __xl_acoshf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @acoshf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @asinf_f32_nofast(float %a) {
+; CHECK-LABEL: asinf_f32_nofast
+; CHECK-NOT: __xl_asinf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @asinf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @asinhf_f32_nofast(float %a) {
+; CHECK-LABEL: asinhf_f32_nofast
+; CHECK-NOT: __xl_asinhf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @asinhf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @atan2f_f32_nofast(float %a, float %b) {
+; CHECK-LABEL: atan2f_f32_nofast
+; CHECK-NOT: __xl_atan2f_finite
+; CHECK: blr
+entry:
+  %call = tail call float @atan2f(float %a, float %b)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @atanf_f32_nofast(float %a) {
+; CHECK-LABEL: atanf_f32_nofast
+; CHECK-NOT: __xl_atanf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @atanf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @atanhf_f32_nofast(float %a) {
+; CHECK-LABEL: atanhf_f32_nofast
+; CHECK-NOT: __xl_atanhf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @atanhf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @cbrtf_f32_nofast(float %a) {
+; CHECK-LABEL: cbrtf_f32_nofast
+; CHECK-NOT: __xl_cbrtf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @cbrtf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @copysignf_f32_nofast(float %a, float %b) {
+; CHECK-LABEL: copysignf_f32_nofast
+; CHECK-NOT: __xl_copysignf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @copysignf(float %a, float %b)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @cosf_f32_nofast(float %a) {
+; CHECK-LABEL: cosf_f32_nofast
+; CHECK-NOT: __xl_cosf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @cosf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @coshf_f32_nofast(float %a) {
+; CHECK-LABEL: coshf_f32_nofast
+; CHECK-NOT: __xl_coshf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @coshf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @erfcf_f32_nofast(float %a) {
+; CHECK-LABEL: erfcf_f32_nofast
+; CHECK-NOT: __xl_erfcf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @erfcf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @erff_f32_nofast(float %a) {
+; CHECK-LABEL: erff_f32_nofast
+; CHECK-NOT: __xl_erff_finite
+; CHECK: blr
+entry:
+  %call = tail call float @erff(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @expf_f32_nofast(float %a) {
+; CHECK-LABEL: expf_f32_nofast
+; CHECK-NOT: __xl_expf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @expf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @expm1f_f32_nofast(float %a) {
+; CHECK-LABEL: expm1f_f32_nofast
+; CHECK-NOT: __xl_expm1f_finite
+; CHECK: blr
+entry:
+  %call = tail call float @expm1f(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @hypotf_f32_nofast(float %a, float %b) {
+; CHECK-LABEL: hypotf_f32_nofast
+; CHECK-NOT: __xl_hypotf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @hypotf(float %a, float %b)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @lgammaf_f32_nofast(float %a) {
+; CHECK-LABEL: lgammaf_f32_nofast
+; CHECK-NOT: __xl_lgammaf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @lgammaf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @log10f_f32_nofast(float %a) {
+; CHECK-LABEL: log10f_f32_nofast
+; CHECK-NOT: __xl_log10f_finite
+; CHECK: blr
+entry:
+  %call = tail call float @log10f(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @log1pf_f32_nofast(float %a) {
+; CHECK-LABEL: log1pf_f32_nofast
+; CHECK-NOT: __xl_log1pf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @log1pf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @logf_f32_nofast(float %a) {
+; CHECK-LABEL: logf_f32_nofast
+; CHECK-NOT: __xl_logf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @logf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @powf_f32_nofast(float %a, float %b) {
+; CHECK-LABEL: powf_f32_nofast
+; CHECK-NOT: __xl_powf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @powf(float %a, float %b)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @rintf_f32_nofast(float %a) {
+; CHECK-LABEL: rintf_f32_nofast
+; CHECK-NOT: __xl_rintf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @rintf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @sinf_f32_nofast(float %a) {
+; CHECK-LABEL: sinf_f32_nofast
+; CHECK-NOT: __xl_sinf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @sinf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @sinhf_f32_nofast(float %a) {
+; CHECK-LABEL: sinhf_f32_nofast
+; CHECK-NOT: __xl_sinhf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @sinhf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @tanf_f32_nofast(float %a) {
+; CHECK-LABEL: tanf_f32_nofast
+; CHECK-NOT: __xl_tanf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @tanf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @tanhf_f32_nofast(float %a) {
+; CHECK-LABEL: tanhf_f32_nofast
+; CHECK-NOT: __xl_tanhf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @tanhf(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @acos_f64_nofast(double %a) {
+; CHECK-LABEL: acos_f64_nofast
+; CHECK-NOT: __xl_acos_finite
+; CHECK: blr
+entry:
+  %call = tail call double @acos(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @acosh_f64_nofast(double %a) {
+; CHECK-LABEL: acosh_f64_nofast
+; CHECK-NOT: __xl_acosh_finite
+; CHECK: blr
+entry:
+  %call = tail call double @acosh(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @anint_f64_nofast(double %a) {
+; CHECK-LABEL: anint_f64_nofast
+; CHECK-NOT: __xl_anint_finite
+; CHECK: blr
+entry:
+  %call = tail call double @anint(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @asin_f64_nofast(double %a) {
+; CHECK-LABEL: asin_f64_nofast
+; CHECK-NOT: __xl_asin_finite
+; CHECK: blr
+entry:
+  %call = tail call double @asin(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @asinh_f64_nofast(double %a) {
+; CHECK-LABEL: asinh_f64_nofast
+; CHECK-NOT: __xl_asinh_finite
+; CHECK: blr
+entry:
+  %call = tail call double @asinh(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @atan_f64_nofast(double %a) {
+; CHECK-LABEL: atan_f64_nofast
+; CHECK-NOT: __xl_atan_finite
+; CHECK: blr
+entry:
+  %call = tail call double @atan(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @atan2_f64_nofast(double %a) {
+; CHECK-LABEL: atan2_f64_nofast
+; CHECK-NOT: __xl_atan2_finite
+; CHECK: blr
+entry:
+  %call = tail call double @atan2(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @atanh_f64_nofast(double %a) {
+; CHECK-LABEL: atanh_f64_nofast
+; CHECK-NOT: __xl_atanh_finite
+; CHECK: blr
+entry:
+  %call = tail call double @atanh(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @cbrt_f64_nofast(double %a) {
+; CHECK-LABEL: cbrt_f64_nofast
+; CHECK-NOT: __xl_cbrt_finite
+; CHECK: blr
+entry:
+  %call = tail call double @cbrt(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @copysign_f64_nofast(double %a, double %b) {
+; CHECK-LABEL: copysign_f64_nofast
+; CHECK-NOT: __xl_copysign_finite
+; CHECK: blr
+entry:
+  %call = tail call double @copysign(double %a, double %b)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @cos_f64_nofast(double %a) {
+; CHECK-LABEL: cos_f64_nofast
+; CHECK-NOT: __xl_cos_finite
+; CHECK: blr
+entry:
+  %call = tail call double @cos(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @cosh_f64_nofast(double %a) {
+; CHECK-LABEL: cosh_f64_nofast
+; CHECK-NOT: __xl_cosh_finite
+; CHECK: blr
+entry:
+  %call = tail call double @cosh(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @cosisin_f64_nofast(double %a) {
+; CHECK-LABEL: cosisin_f64_nofast
+; CHECK-NOT: __xl_cosisin_finite
+; CHECK: blr
+entry:
+  %call = tail call double @cosisin(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @dnint_f64_nofast(double %a) {
+; CHECK-LABEL: dnint_f64_nofast
+; CHECK-NOT: __xl_dnint_finite
+; CHECK: blr
+entry:
+  %call = tail call double @dnint(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @erf_f64_nofast(double %a) {
+; CHECK-LABEL: erf_f64_nofast
+; CHECK-NOT: __xl_erf_finite
+; CHECK: blr
+entry:
+  %call = tail call double @erf(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @erfc_f64_nofast(double %a) {
+; CHECK-LABEL: erfc_f64_nofast
+; CHECK-NOT: __xl_erfc_finite
+; CHECK: blr
+entry:
+  %call = tail call double @erfc(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @exp_f64_nofast(double %a) {
+; CHECK-LABEL: exp_f64_nofast
+; CHECK-NOT: __xl_exp_finite
+; CHECK: blr
+entry:
+  %call = tail call double @exp(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @expm1_f64_nofast(double %a) {
+; CHECK-LABEL: expm1_f64_nofast
+; CHECK-NOT: __xl_expm1_finite
+; CHECK: blr
+entry:
+  %call = tail call double @expm1(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @hypot_f64_nofast(double %a, double %b) {
+; CHECK-LABEL: hypot_f64_nofast
+; CHECK-NOT: __xl_hypot_finite
+; CHECK: blr
+entry:
+  %call = tail call double @hypot(double %a, double %b)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @lgamma_f64_nofast(double %a) {
+; CHECK-LABEL: lgamma_f64_nofast
+; CHECK-NOT: __xl_lgamma_finite
+; CHECK: blr
+entry:
+  %call = tail call double @lgamma(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @log_f64_nofast(double %a) {
+; CHECK-LABEL: log_f64_nofast
+; CHECK-NOT: __xl_log_finite
+; CHECK: blr
+entry:
+  %call = tail call double @log(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @log10_f64_nofast(double %a) {
+; CHECK-LABEL: log10_f64_nofast
+; CHECK-NOT: __xl_log10_finite
+; CHECK: blr
+entry:
+  %call = tail call double @log10(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @log1p_f64_nofast(double %a) {
+; CHECK-LABEL: log1p_f64_nofast
+; CHECK-NOT: __xl_log1p_finite
+; CHECK: blr
+entry:
+  %call = tail call double @log1p(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @pow_f64_nofast(double %a, double %b) {
+; CHECK-LABEL: pow_f64_nofast
+; CHECK-NOT: __xl_pow_finite
+; CHECK: blr
+entry:
+  %call = tail call double @pow(double %a, double %b)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @rsqrt_f64_nofast(double %a) {
+; CHECK-LABEL: rsqrt_f64_nofast
+; CHECK-NOT: __xl_rsqrt_finite
+; CHECK: blr
+entry:
+  %call = tail call double @rsqrt(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @sin_f64_nofast(double %a) {
+; CHECK-LABEL: sin_f64_nofast
+; CHECK-NOT: __xl_sin_finite
+; CHECK: blr
+entry:
+  %call = tail call double @sin(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @sincos_f64_nofast(double %a) {
+; CHECK-LABEL: sincos_f64_nofast
+; CHECK-NOT: __xl_sincos_finite
+; CHECK: blr
+entry:
+  %call = tail call double @sincos(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @sinh_f64_nofast(double %a) {
+; CHECK-LABEL: sinh_f64_nofast
+; CHECK-NOT: __xl_sinh_finite
+; CHECK: blr
+entry:
+  %call = tail call double @sinh(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @sqrt_f64_nofast(double %a) {
+; CHECK-LABEL: sqrt_f64_nofast
+; CHECK-NOT: __xl_sqrt_finite
+; CHECK: blr
+entry:
+  %call = tail call double @sqrt(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @tan_f64_nofast(double %a) {
+; CHECK-LABEL: tan_f64_nofast
+; CHECK-NOT: __xl_tan_finite
+; CHECK: blr
+entry:
+  %call = tail call double @tan(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @tanh_f64_nofast(double %a) {
+; CHECK-LABEL: tanh_f64_nofast
+; CHECK-NOT: __xl_tanh_finite
+; CHECK: blr
+entry:
+  %call = tail call double @tanh(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @__acosf_finite_f32_nofast(float %a) #0 {
+; CHECK-LABEL: __acosf_finite_f32_nofast
+; CHECK-NOT: __xl_acosf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @__acosf_finite(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @__acoshf_finite_f32_nofast(float %a) #0 {
+; CHECK-LABEL: __acoshf_finite_f32_nofast
+; CHECK-NOT: __xl_acoshf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @__acoshf_finite(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @__asinf_finite_f32_nofast(float %a) #0 {
+; CHECK-LABEL: __asinf_finite_f32_nofast
+; CHECK-NOT: __xl_asinf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @__asinf_finite(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @__atan2f_finite_f32_nofast(float %a, float %b) #0 {
+; CHECK-LABEL: __atan2f_finite_f32_nofast
+; CHECK-NOT: __xl_atan2f_finite
+; CHECK: blr
+entry:
+  %call = tail call float @__atan2f_finite(float %a, float %b)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @__atanhf_finite_f32_nofast(float %a) #0 {
+; CHECK-LABEL: __atanhf_finite_f32_nofast
+; CHECK-NOT: __xl_atanhf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @__atanhf_finite(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @__coshf_finite_f32_nofast(float %a) #0 {
+; CHECK-LABEL: __coshf_finite_f32_nofast
+; CHECK-NOT: __xl_coshf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @__coshf_finite(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @__expf_finite_f32_nofast(float %a) #0 {
+; CHECK-LABEL: __expf_finite_f32_nofast
+; CHECK-NOT: __xl_expf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @__expf_finite(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @__logf_finite_f32_nofast(float %a) #0 {
+; CHECK-LABEL: __logf_finite_f32_nofast
+; CHECK-NOT: __xl_logf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @__logf_finite(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @__log10f_finite_f32_nofast(float %a) #0 {
+; CHECK-LABEL: __log10f_finite_f32_nofast
+; CHECK-NOT: __xl_log10f_finite
+; CHECK: blr
+entry:
+  %call = tail call float @__log10f_finite(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @__powf_finite_f32_nofast(float %a, float %b) #0 {
+; CHECK-LABEL: __powf_finite_f32_nofast
+; CHECK-NOT: __xl_powf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @__powf_finite(float %a, float %b)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define float @__sinhf_finite_f32_nofast(float %a) #0 {
+; CHECK-LABEL: __sinhf_finite_f32_nofast
+; CHECK-NOT: __xl_sinhf_finite
+; CHECK: blr
+entry:
+  %call = tail call float @__sinhf_finite(float %a)
+  ret float %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @__acos_finite_f64_nofast(double %a) #0 {
+; CHECK-LABEL: __acos_finite_f64_nofast
+; CHECK-NOT: __xl_acos_finite
+; CHECK: blr
+entry:
+  %call = tail call double @__acos_finite(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @__acosh_finite_f64_nofast(double %a) #0 {
+; CHECK-LABEL: __acosh_finite_f64_nofast
+; CHECK-NOT: __xl_acosh_finite
+; CHECK: blr
+entry:
+  %call = tail call double @__acosh_finite(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @__asin_finite_f64_nofast(double %a) #0 {
+; CHECK-LABEL: __asin_finite_f64_nofast
+; CHECK-NOT: __xl_asin_finite
+; CHECK: blr
+entry:
+  %call = tail call double @__asin_finite(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @__atan2_finite_f64_nofast(double %a, double %b) #0 {
+; CHECK-LABEL: __atan2_finite_f64_nofast
+; CHECK-NOT: __xl_atan2_finite
+; CHECK: blr
+entry:
+  %call = tail call double @__atan2_finite(double %a, double %b)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @__atanh_finite_f64_nofast(double %a) #0 {
+; CHECK-LABEL: __atanh_finite_f64_nofast
+; CHECK-NOT: __xl_atanh_finite
+; CHECK: blr
+entry:
+  %call = tail call double @__atanh_finite(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @__cosh_finite_f64_nofast(double %a) #0 {
+; CHECK-LABEL: __cosh_finite_f64_nofast
+; CHECK-NOT: __xl_cosh_finite
+; CHECK: blr
+entry:
+  %call = tail call double @__cosh_finite(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @__exp_finite_f64_nofast(double %a) #0 {
+; CHECK-LABEL: __exp_finite_f64_nofast
+; CHECK-NOT: __xl_exp_finite
+; CHECK: blr
+entry:
+  %call = tail call double @__exp_finite(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @__log_finite_f64_nofast(double %a) #0 {
+; CHECK-LABEL: __log_finite_f64_nofast
+; CHECK-NOT: __xl_log_finite
+; CHECK: blr
+entry:
+  %call = tail call double @__log_finite(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @__log10_finite_f64_nofast(double %a) #0 {
+; CHECK-LABEL: __log10_finite_f64_nofast
+; CHECK-NOT: __xl_log10_finite
+; CHECK: blr
+entry:
+  %call = tail call double @__log10_finite(double %a)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @__pow_finite_f64_nofast(double %a, double %b) #0 {
+; CHECK-LABEL: __pow_finite_f64_nofast
+; CHECK-NOT: __xl_pow_finite
+; CHECK: blr
+entry:
+  %call = tail call double @__pow_finite(double %a, double %b)
+  ret double %call
+}
+
+; Without nnan ninf afn nsz flags on the call instruction
+define double @__sinh_finite_f64_nofast(double %a) #0 {
+; CHECK-LABEL: __sinh_finite_f64_nofast
+; CHECK-NOT: __xl_sinh_finite
+; CHECK: blr
+entry:
+  %call = tail call double @__sinh_finite(double %a)
+  ret double %call
+}
+
diff --git a/llvm/test/CodeGen/PowerPC/pow-025-075-intrinsic-scalar-mass-afn.ll b/llvm/test/CodeGen/PowerPC/pow-025-075-intrinsic-scalar-mass-afn.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/pow-025-075-intrinsic-scalar-mass-afn.ll
@@ -0,0 +1,95 @@
+; RUN: llc -verify-machineinstrs -O3 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck --check-prefix=CHECK-LNX %s
+; RUN: llc -verify-machineinstrs -O3 -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck --check-prefix=CHECK-AIX %s
+
+declare float @llvm.pow.f32 (float, float);
+declare double @llvm.pow.f64 (double, double);
+
+; afn flag powf with 0.25
+define float @llvmintr_powf_f32_afn025(float %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_powf_f32_afn025:
+; CHECK-LNX:       bl __xl_powf
+; CHECK-LNX:       blr
+;
+; CHECK-AIX-LABEL: llvmintr_powf_f32_afn025:
+; CHECK-AIX:       bl .__xl_powf[PR]
+; CHECK-AIX:       blr
+entry:
+  %call = tail call afn float @llvm.pow.f32(float %a, float 2.500000e-01)
+  ret float %call
+}
+
+; afn flag pow with 0.25
+define double @llvmintr_pow_f64_afn025(double %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_pow_f64_afn025:
+; CHECK-LNX:       bl __xl_pow
+; CHECK-LNX:       blr
+;
+; CHECK-AIX-LABEL: llvmintr_pow_f64_afn025:
+; CHECK-AIX:       bl .__xl_pow[PR]
+; CHECK-AIX:       blr
+entry:
+  %call = tail call afn double @llvm.pow.f64(double %a, double 2.500000e-01)
+  ret double %call
+}
+
+; afn flag powf with 0.75
+define float @llvmintr_powf_f32_afn075(float %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_powf_f32_afn075:
+; CHECK-LNX:       bl __xl_powf
+; CHECK-LNX:       blr
+;
+; CHECK-AIX-LABEL: llvmintr_powf_f32_afn075:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX:       bl .__xl_powf[PR]
+; CHECK-AIX:       blr
+entry:
+  %call = tail call afn float @llvm.pow.f32(float %a, float 7.500000e-01)
+  ret float %call
+}
+
+; afn flag pow with 0.75
+define double @llvmintr_pow_f64_afn075(double %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_pow_f64_afn075:
+; CHECK-LNX:       bl __xl_pow
+; CHECK-LNX:       blr
+;
+; CHECK-AIX-LABEL: llvmintr_pow_f64_afn075:
+; CHECK-AIX:       bl .__xl_pow[PR]
+; CHECK-AIX:       blr
+entry:
+  %call = tail call afn double @llvm.pow.f64(double %a, double 7.500000e-01)
+  ret double %call
+}
+
+; afn flag powf with 0.50
+define float @llvmintr_powf_f32_afn050(float %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_powf_f32_afn050:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX:       bl __xl_powf
+; CHECK-LNX:       blr
+;
+; CHECK-AIX-LABEL: llvmintr_powf_f32_afn050:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX:       bl .__xl_powf[PR]
+; CHECK-AIX:       blr
+entry:
+  %call = tail call afn float @llvm.pow.f32(float %a, float 5.000000e-01)
+  ret float %call
+}
+
+; afn flag pow with 0.50
+define double @llvmintr_pow_f64_afn050(double %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_pow_f64_afn050:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX:       bl __xl_pow
+; CHECK-LNX:       blr
+;
+; CHECK-AIX-LABEL: llvmintr_pow_f64_afn050:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX:       bl .__xl_pow[PR]
+; CHECK-AIX:       blr
+entry:
+  %call = tail call afn double @llvm.pow.f64(double %a, double 5.000000e-01)
+  ret double %call
+}
+attributes #1 = { "approx-func-fp-math"="true" }
diff --git a/llvm/test/CodeGen/PowerPC/pow-025-075-intrinsic-scalar-mass-fast.ll b/llvm/test/CodeGen/PowerPC/pow-025-075-intrinsic-scalar-mass-fast.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/pow-025-075-intrinsic-scalar-mass-fast.ll
@@ -0,0 +1,290 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -O3 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck --check-prefix=CHECK-LNX %s
+; RUN: llc -verify-machineinstrs -O3 -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck --check-prefix=CHECK-AIX %s
+
+declare float @llvm.pow.f32 (float, float);
+declare double @llvm.pow.f64 (double, double);
+
+; fast-math powf with 0.25
+define float @llvmintr_powf_f32_fast025(float %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_powf_f32_fast025:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    xsrsqrtesp 0, 1
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI0_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 3, .LCPI0_0@toc@l(3)
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI0_1@toc@ha
+; CHECK-LNX-NEXT:    lfs 4, .LCPI0_1@toc@l(3)
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI0_2@toc@ha
+; CHECK-LNX-NEXT:    lfs 5, .LCPI0_2@toc@l(3)
+; CHECK-LNX-NEXT:    xsmulsp 2, 1, 0
+; CHECK-LNX-NEXT:    xsabsdp 1, 1
+; CHECK-LNX-NEXT:    xsmulsp 0, 2, 0
+; CHECK-LNX-NEXT:    xsmulsp 2, 2, 3
+; CHECK-LNX-NEXT:    xssubsp 1, 1, 5
+; CHECK-LNX-NEXT:    xsaddsp 0, 0, 4
+; CHECK-LNX-NEXT:    xsmulsp 0, 2, 0
+; CHECK-LNX-NEXT:    xxlxor 2, 2, 2
+; CHECK-LNX-NEXT:    fsel 0, 1, 0, 2
+; CHECK-LNX-NEXT:    xsrsqrtesp 1, 0
+; CHECK-LNX-NEXT:    xsmulsp 6, 0, 1
+; CHECK-LNX-NEXT:    xsabsdp 0, 0
+; CHECK-LNX-NEXT:    xsmulsp 1, 6, 1
+; CHECK-LNX-NEXT:    xsmulsp 3, 6, 3
+; CHECK-LNX-NEXT:    xssubsp 0, 0, 5
+; CHECK-LNX-NEXT:    xsaddsp 1, 1, 4
+; CHECK-LNX-NEXT:    xsmulsp 1, 3, 1
+; CHECK-LNX-NEXT:    fsel 1, 0, 1, 2
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: llvmintr_powf_f32_fast025:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    bl .__xl_powf_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz float @llvm.pow.f32(float %a, float 2.500000e-01)
+  ret float %call
+}
+
+; fast-math pow with 0.25
+define double @llvmintr_pow_f64_fast025(double %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_pow_f64_fast025:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    xstsqrtdp 0, 1
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI1_0@toc@ha
+; CHECK-LNX-NEXT:    addis 4, 2, .LCPI1_1@toc@ha
+; CHECK-LNX-NEXT:    lfs 0, .LCPI1_0@toc@l(3)
+; CHECK-LNX-NEXT:    lfs 2, .LCPI1_1@toc@l(4)
+; CHECK-LNX-NEXT:    bc 12, 2, .LBB1_3
+; CHECK-LNX-NEXT:  # %bb.1: # %entry
+; CHECK-LNX-NEXT:    xsrsqrtedp 3, 1
+; CHECK-LNX-NEXT:    xsmuldp 4, 1, 3
+; CHECK-LNX-NEXT:    xsmuldp 4, 4, 3
+; CHECK-LNX-NEXT:    xsmuldp 3, 3, 0
+; CHECK-LNX-NEXT:    xsadddp 4, 4, 2
+; CHECK-LNX-NEXT:    xsmuldp 3, 3, 4
+; CHECK-LNX-NEXT:    xsmuldp 1, 1, 3
+; CHECK-LNX-NEXT:    xsmuldp 3, 1, 3
+; CHECK-LNX-NEXT:    xsmuldp 1, 1, 0
+; CHECK-LNX-NEXT:    xsadddp 3, 3, 2
+; CHECK-LNX-NEXT:    xsmuldp 1, 1, 3
+; CHECK-LNX-NEXT:    xstsqrtdp 0, 1
+; CHECK-LNX-NEXT:    bc 4, 2, .LBB1_4
+; CHECK-LNX-NEXT:  .LBB1_2:
+; CHECK-LNX-NEXT:    xssqrtdp 1, 1
+; CHECK-LNX-NEXT:    blr
+; CHECK-LNX-NEXT:  .LBB1_3:
+; CHECK-LNX-NEXT:    xssqrtdp 1, 1
+; CHECK-LNX-NEXT:    xstsqrtdp 0, 1
+; CHECK-LNX-NEXT:    bc 12, 2, .LBB1_2
+; CHECK-LNX-NEXT:  .LBB1_4: # %entry
+; CHECK-LNX-NEXT:    xsrsqrtedp 3, 1
+; CHECK-LNX-NEXT:    xsmuldp 4, 1, 3
+; CHECK-LNX-NEXT:    xsmuldp 4, 4, 3
+; CHECK-LNX-NEXT:    xsmuldp 3, 3, 0
+; CHECK-LNX-NEXT:    xsadddp 4, 4, 2
+; CHECK-LNX-NEXT:    xsmuldp 3, 3, 4
+; CHECK-LNX-NEXT:    xsmuldp 1, 1, 3
+; CHECK-LNX-NEXT:    xsmuldp 3, 1, 3
+; CHECK-LNX-NEXT:    xsmuldp 0, 1, 0
+; CHECK-LNX-NEXT:    xsadddp 2, 3, 2
+; CHECK-LNX-NEXT:    xsmuldp 1, 0, 2
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: llvmintr_pow_f64_fast025:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    bl .__xl_pow_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz double @llvm.pow.f64(double %a, double 2.500000e-01)
+  ret double %call
+}
+
+; fast-math powf with 0.75
+define float @llvmintr_powf_f32_fast075(float %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_powf_f32_fast075:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    xsrsqrtesp 0, 1
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI2_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 3, .LCPI2_0@toc@l(3)
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI2_1@toc@ha
+; CHECK-LNX-NEXT:    lfs 4, .LCPI2_1@toc@l(3)
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI2_2@toc@ha
+; CHECK-LNX-NEXT:    lfs 5, .LCPI2_2@toc@l(3)
+; CHECK-LNX-NEXT:    xsmulsp 2, 1, 0
+; CHECK-LNX-NEXT:    xsabsdp 1, 1
+; CHECK-LNX-NEXT:    xsmulsp 0, 2, 0
+; CHECK-LNX-NEXT:    xsmulsp 2, 2, 3
+; CHECK-LNX-NEXT:    xssubsp 1, 1, 5
+; CHECK-LNX-NEXT:    xsaddsp 0, 0, 4
+; CHECK-LNX-NEXT:    xsmulsp 0, 2, 0
+; CHECK-LNX-NEXT:    xxlxor 2, 2, 2
+; CHECK-LNX-NEXT:    fsel 0, 1, 0, 2
+; CHECK-LNX-NEXT:    xsrsqrtesp 1, 0
+; CHECK-LNX-NEXT:    xsmulsp 6, 0, 1
+; CHECK-LNX-NEXT:    xsmulsp 1, 6, 1
+; CHECK-LNX-NEXT:    xsmulsp 3, 6, 3
+; CHECK-LNX-NEXT:    xsaddsp 1, 1, 4
+; CHECK-LNX-NEXT:    xsabsdp 4, 0
+; CHECK-LNX-NEXT:    xsmulsp 1, 3, 1
+; CHECK-LNX-NEXT:    xssubsp 3, 4, 5
+; CHECK-LNX-NEXT:    fsel 1, 3, 1, 2
+; CHECK-LNX-NEXT:    xsmulsp 1, 0, 1
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: llvmintr_powf_f32_fast075:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    bl .__xl_powf_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz float @llvm.pow.f32(float %a, float 7.500000e-01)
+  ret float %call
+}
+
+; fast-math pow with 0.75
+define double @llvmintr_pow_f64_fast075(double %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_pow_f64_fast075:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    xstsqrtdp 0, 1
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI3_0@toc@ha
+; CHECK-LNX-NEXT:    addis 4, 2, .LCPI3_1@toc@ha
+; CHECK-LNX-NEXT:    lfs 0, .LCPI3_0@toc@l(3)
+; CHECK-LNX-NEXT:    lfs 2, .LCPI3_1@toc@l(4)
+; CHECK-LNX-NEXT:    bc 12, 2, .LBB3_3
+; CHECK-LNX-NEXT:  # %bb.1: # %entry
+; CHECK-LNX-NEXT:    xsrsqrtedp 3, 1
+; CHECK-LNX-NEXT:    xsmuldp 4, 1, 3
+; CHECK-LNX-NEXT:    xsmuldp 4, 4, 3
+; CHECK-LNX-NEXT:    xsmuldp 3, 3, 0
+; CHECK-LNX-NEXT:    xsadddp 4, 4, 2
+; CHECK-LNX-NEXT:    xsmuldp 3, 3, 4
+; CHECK-LNX-NEXT:    xsmuldp 1, 1, 3
+; CHECK-LNX-NEXT:    xsmuldp 3, 1, 3
+; CHECK-LNX-NEXT:    xsmuldp 1, 1, 0
+; CHECK-LNX-NEXT:    xsadddp 3, 3, 2
+; CHECK-LNX-NEXT:    xsmuldp 1, 1, 3
+; CHECK-LNX-NEXT:    xstsqrtdp 0, 1
+; CHECK-LNX-NEXT:    bc 4, 2, .LBB3_4
+; CHECK-LNX-NEXT:  .LBB3_2:
+; CHECK-LNX-NEXT:    xssqrtdp 0, 1
+; CHECK-LNX-NEXT:    xsmuldp 1, 1, 0
+; CHECK-LNX-NEXT:    blr
+; CHECK-LNX-NEXT:  .LBB3_3:
+; CHECK-LNX-NEXT:    xssqrtdp 1, 1
+; CHECK-LNX-NEXT:    xstsqrtdp 0, 1
+; CHECK-LNX-NEXT:    bc 12, 2, .LBB3_2
+; CHECK-LNX-NEXT:  .LBB3_4: # %entry
+; CHECK-LNX-NEXT:    xsrsqrtedp 3, 1
+; CHECK-LNX-NEXT:    xsmuldp 4, 1, 3
+; CHECK-LNX-NEXT:    xsmuldp 4, 4, 3
+; CHECK-LNX-NEXT:    xsmuldp 3, 3, 0
+; CHECK-LNX-NEXT:    xsadddp 4, 4, 2
+; CHECK-LNX-NEXT:    xsmuldp 3, 3, 4
+; CHECK-LNX-NEXT:    xsmuldp 4, 1, 3
+; CHECK-LNX-NEXT:    xsmuldp 3, 4, 3
+; CHECK-LNX-NEXT:    xsmuldp 0, 4, 0
+; CHECK-LNX-NEXT:    xsadddp 2, 3, 2
+; CHECK-LNX-NEXT:    xsmuldp 0, 0, 2
+; CHECK-LNX-NEXT:    xsmuldp 1, 1, 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: llvmintr_pow_f64_fast075:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    bl .__xl_pow_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz double @llvm.pow.f64(double %a, double 7.500000e-01)
+  ret double %call
+}
+
+; fast-math powf with 0.50
+define float @llvmintr_powf_f32_fast050(float %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_powf_f32_fast050:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    bl __xl_powf_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: llvmintr_powf_f32_fast050:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    bl .__xl_powf_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz float @llvm.pow.f32(float %a, float 5.000000e-01)
+  ret float %call
+}
+
+; fast-math pow with 0.50
+define double @llvmintr_pow_f64_fast050(double %a) #1 {
+; CHECK-LNX-LABEL: llvmintr_pow_f64_fast050:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    bl __xl_pow_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: llvmintr_pow_f64_fast050:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    bl .__xl_pow_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz double @llvm.pow.f64(double %a, double 5.000000e-01)
+  ret double %call
+}
+attributes #1 = { "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "approx-func-fp-math"="true" }
diff --git a/llvm/test/CodeGen/PowerPC/pow-025-075-nointrinsic-scalar-mass-fast.ll b/llvm/test/CodeGen/PowerPC/pow-025-075-nointrinsic-scalar-mass-fast.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/pow-025-075-nointrinsic-scalar-mass-fast.ll
@@ -0,0 +1,456 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -O3 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck --check-prefix=CHECK-LNX %s
+; RUN: llc -verify-machineinstrs -O3 -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck --check-prefix=CHECK-AIX %s
+
+declare float @powf (float, float);
+declare double @pow (double, double);
+declare float @__powf_finite (float, float);
+declare double @__pow_finite (double, double);
+
+; fast-math powf with 0.25
+define float @powf_f32_fast025(float %a) #1 {
+;
+; CHECK-LNX-LABEL: powf_f32_fast025:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI0_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI0_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_powf_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: powf_f32_fast025:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C0(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_powf_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz float @powf(float %a, float 2.500000e-01)
+  ret float %call
+}
+
+; fast-math pow with 0.25
+define double @pow_f64_fast025(double %a) #1 {
+;
+; CHECK-LNX-LABEL: pow_f64_fast025:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI1_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI1_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_pow_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: pow_f64_fast025:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C1(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_pow_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz double @pow(double %a, double 2.500000e-01)
+  ret double %call
+}
+
+; fast-math powf with 0.75
+define float @powf_f32_fast075(float %a) #1 {
+;
+; CHECK-LNX-LABEL: powf_f32_fast075:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI2_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI2_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_powf_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: powf_f32_fast075:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C2(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_powf_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz float @powf(float %a, float 7.500000e-01)
+  ret float %call
+}
+
+; fast-math pow with 0.75
+define double @pow_f64_fast075(double %a) #1 {
+;
+; CHECK-LNX-LABEL: pow_f64_fast075:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI3_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI3_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_pow_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: pow_f64_fast075:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C3(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_pow_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz double @pow(double %a, double 7.500000e-01)
+  ret double %call
+}
+
+; fast-math powf with 0.50
+define float @powf_f32_fast050(float %a) #1 {
+;
+; CHECK-LNX-LABEL: powf_f32_fast050:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI4_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI4_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_powf_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: powf_f32_fast050:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C4(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_powf_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz float @powf(float %a, float 5.000000e-01)
+  ret float %call
+}
+
+; fast-math pow with 0.50
+define double @pow_f64_fast050(double %a) #1 {
+;
+; CHECK-LNX-LABEL: pow_f64_fast050:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI5_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI5_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_pow_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: pow_f64_fast050:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C5(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_pow_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz double @pow(double %a, double 5.000000e-01)
+  ret double %call
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; fast-math __powf_finite with 0.25
+define float @__powf_finite_f32_fast025(float %a) #1 {
+;
+; CHECK-LNX-LABEL: __powf_finite_f32_fast025:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI6_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI6_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_powf_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: __powf_finite_f32_fast025:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C6(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_powf_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__powf_finite(float %a, float 2.500000e-01)
+  ret float %call
+}
+
+; fast-math __pow_finite with 0.25
+define double @__pow_finite_f64_fast025(double %a) #1 {
+;
+; CHECK-LNX-LABEL: __pow_finite_f64_fast025:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI7_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI7_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_pow_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: __pow_finite_f64_fast025:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C7(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_pow_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__pow_finite(double %a, double 2.500000e-01)
+  ret double %call
+}
+
+; fast-math __powf_finite with 0.75
+define float @__powf_finite_f32_fast075(float %a) #1 {
+;
+; CHECK-LNX-LABEL: __powf_finite_f32_fast075:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI8_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI8_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_powf_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: __powf_finite_f32_fast075:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C8(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_powf_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__powf_finite(float %a, float 7.500000e-01)
+  ret float %call
+}
+
+; fast-math __pow_finite with 0.75
+define double @__pow_finite_f64_fast075(double %a) #1 {
+;
+; CHECK-LNX-LABEL: __pow_finite_f64_fast075:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI9_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI9_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_pow_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: __pow_finite_f64_fast075:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C9(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_pow_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__pow_finite(double %a, double 7.500000e-01)
+  ret double %call
+}
+
+; fast-math __powf_finite with 0.50
+define float @__powf_finite_f32_fast050(float %a) #1 {
+;
+; CHECK-LNX-LABEL: __powf_finite_f32_fast050:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI10_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI10_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_powf_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: __powf_finite_f32_fast050:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C10(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_powf_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz float @__powf_finite(float %a, float 5.000000e-01)
+  ret float %call
+}
+
+; fast-math __pow_finite with 0.50
+define double @__pow_finite_f64_fast050(double %a) #1 {
+;
+; CHECK-LNX-LABEL: __pow_finite_f64_fast050:
+; CHECK-LNX:       # %bb.0: # %entry
+; CHECK-LNX-NEXT:    mflr 0
+; CHECK-LNX-NEXT:    std 0, 16(1)
+; CHECK-LNX-NEXT:    stdu 1, -32(1)
+; CHECK-LNX-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-LNX-NEXT:    .cfi_offset lr, 16
+; CHECK-LNX-NEXT:    addis 3, 2, .LCPI11_0@toc@ha
+; CHECK-LNX-NEXT:    lfs 2, .LCPI11_0@toc@l(3)
+; CHECK-LNX-NEXT:    bl __xl_pow_finite
+; CHECK-LNX-NEXT:    nop
+; CHECK-LNX-NEXT:    addi 1, 1, 32
+; CHECK-LNX-NEXT:    ld 0, 16(1)
+; CHECK-LNX-NEXT:    mtlr 0
+; CHECK-LNX-NEXT:    blr
+;
+; CHECK-AIX-LABEL: __pow_finite_f64_fast050:
+; CHECK-AIX:       # %bb.0: # %entry
+; CHECK-AIX-NEXT:    mflr 0
+; CHECK-AIX-NEXT:    stw 0, 8(1)
+; CHECK-AIX-NEXT:    stwu 1, -64(1)
+; CHECK-AIX-NEXT:    lwz 3, L..C11(2) # %const.0
+; CHECK-AIX-NEXT:    lfs 2, 0(3)
+; CHECK-AIX-NEXT:    bl .__xl_pow_finite[PR]
+; CHECK-AIX-NEXT:    nop
+; CHECK-AIX-NEXT:    addi 1, 1, 64
+; CHECK-AIX-NEXT:    lwz 0, 8(1)
+; CHECK-AIX-NEXT:    mtlr 0
+; CHECK-AIX-NEXT:    blr
+entry:
+  %call = tail call nnan ninf afn nsz double @__pow_finite(double %a, double 5.000000e-01)
+  ret double %call
+}
+
+attributes #1 = { "approx-func-fp-math"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }