Index: llvm/include/llvm/IR/Operator.h =================================================================== --- llvm/include/llvm/IR/Operator.h +++ llvm/include/llvm/IR/Operator.h @@ -247,6 +247,9 @@ void operator|=(const FastMathFlags &OtherFlags) { Flags |= OtherFlags.Flags; } + + /// Print fast-math flags to \p O. + void print(raw_ostream &O) const; }; /// Utility class for floating point operations which can have Index: llvm/lib/IR/AsmWriter.cpp =================================================================== --- llvm/lib/IR/AsmWriter.cpp +++ llvm/lib/IR/AsmWriter.cpp @@ -1314,25 +1314,8 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) { if (const FPMathOperator *FPO = dyn_cast(U)) { - // 'Fast' is an abbreviation for all fast-math-flags. - if (FPO->isFast()) - Out << " fast"; - else { - if (FPO->hasAllowReassoc()) - Out << " reassoc"; - if (FPO->hasNoNaNs()) - Out << " nnan"; - if (FPO->hasNoInfs()) - Out << " ninf"; - if (FPO->hasNoSignedZeros()) - Out << " nsz"; - if (FPO->hasAllowReciprocal()) - Out << " arcp"; - if (FPO->hasAllowContract()) - Out << " contract"; - if (FPO->hasApproxFunc()) - Out << " afn"; - } + FastMathFlags FMF = FPO->getFastMathFlags(); + FMF.print(Out); } if (const OverflowingBinaryOperator *OBO = Index: llvm/lib/IR/Operator.cpp =================================================================== --- llvm/lib/IR/Operator.cpp +++ llvm/lib/IR/Operator.cpp @@ -226,4 +226,25 @@ } return true; } + +void FastMathFlags::print(raw_ostream &O) const { + if (all()) + O << " fast"; + else { + if (allowReassoc()) + O << " reassoc"; + if (noNaNs()) + O << " nnan"; + if (noInfs()) + O << " ninf"; + if (noSignedZeros()) + O << " nsz"; + if (allowReciprocal()) + O << " arcp"; + if (allowContract()) + O << " contract"; + if (approxFunc()) + O << " afn"; + } +} } // namespace llvm Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -9636,6 +9636,7 @@ VecOp, Plan->getVPValue(R->getOperand(1))}; VPInstruction *FMulRecipe = new VPInstruction(Instruction::FMul, FMulOps); + FMulRecipe->setFastMathFlags(R->getFastMathFlags()); WidenRecipe->getParent()->insert(FMulRecipe, WidenRecipe->getIterator()); VecOp = FMulRecipe; Index: llvm/lib/Transforms/Vectorize/VPlan.h =================================================================== --- llvm/lib/Transforms/Vectorize/VPlan.h +++ llvm/lib/Transforms/Vectorize/VPlan.h @@ -789,6 +789,7 @@ private: typedef unsigned char OpcodeTy; OpcodeTy Opcode; + FastMathFlags FMF; /// Utility method serving execute(): generates a single instance of the /// modeled instruction. @@ -870,6 +871,9 @@ return true; } } + + /// Set the fast-math flags. + void setFastMathFlags(FastMathFlags FMFNew); }; /// VPWidenRecipe is a recipe for producing a copy of vector type its Index: llvm/lib/Transforms/Vectorize/VPlan.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/VPlan.cpp +++ llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -718,6 +718,8 @@ void VPInstruction::execute(VPTransformState &State) { assert(!State.Instance && "VPInstruction executing an Instance"); + IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); + State.Builder.setFastMathFlags(FMF); for (unsigned Part = 0; Part < State.UF; ++Part) generateInstruction(State, Part); } @@ -760,6 +762,8 @@ O << Instruction::getOpcodeName(getOpcode()); } + FMF.print(O); + for (const VPValue *Operand : operands()) { O << " "; Operand->printAsOperand(O, SlotTracker); @@ -767,6 +771,16 @@ } #endif +void VPInstruction::setFastMathFlags(FastMathFlags FMFNew) { + // Make sure the VPInstruction is a floating-point operation. + assert((Opcode == Instruction::FAdd || Opcode == Instruction::FMul || + Opcode == Instruction::FNeg || Opcode == Instruction::FSub || + Opcode == Instruction::FDiv || Opcode == Instruction::FRem || + Opcode == Instruction::FCmp) && + "this op can't take fast-math flags"); + FMF = FMFNew; +} + /// Generate the code inside the body of the vectorized loop. Assumes a single /// LoopVectorBody basic-block was created for this. Introduce additional /// basic-blocks as needed, and fill them all. Index: llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll +++ llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll @@ -483,10 +483,10 @@ ; CHECK-ORDERED: [[WIDE_LOAD5:%.*]] = load , * ; CHECK-ORDERED: [[WIDE_LOAD6:%.*]] = load , * ; CHECK-ORDERED: [[WIDE_LOAD7:%.*]] = load , * -; CHECK-ORDERED: [[FMUL:%.*]] = fmul [[WIDE_LOAD]], [[WIDE_LOAD4]] -; CHECK-ORDERED: [[FMUL1:%.*]] = fmul [[WIDE_LOAD1]], [[WIDE_LOAD5]] -; CHECK-ORDERED: [[FMUL2:%.*]] = fmul [[WIDE_LOAD2]], [[WIDE_LOAD6]] -; CHECK-ORDERED: [[FMUL3:%.*]] = fmul [[WIDE_LOAD3]], [[WIDE_LOAD7]] +; CHECK-ORDERED: [[FMUL:%.*]] = fmul nnan [[WIDE_LOAD]], [[WIDE_LOAD4]] +; CHECK-ORDERED: [[FMUL1:%.*]] = fmul nnan [[WIDE_LOAD1]], [[WIDE_LOAD5]] +; CHECK-ORDERED: [[FMUL2:%.*]] = fmul nnan [[WIDE_LOAD2]], [[WIDE_LOAD6]] +; CHECK-ORDERED: [[FMUL3:%.*]] = fmul nnan [[WIDE_LOAD3]], [[WIDE_LOAD7]] ; CHECK-ORDERED: [[RDX:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[VEC_PHI]], [[FMUL]]) ; CHECK-ORDERED: [[RDX1:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[RDX]], [[FMUL1]]) ; CHECK-ORDERED: [[RDX2:%.*]] = call nnan float @llvm.vector.reduce.fadd.nxv8f32(float [[RDX1]], [[FMUL2]]) Index: llvm/test/Transforms/LoopVectorize/vplan-printing.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/vplan-printing.ll +++ llvm/test/Transforms/LoopVectorize/vplan-printing.ll @@ -227,4 +227,39 @@ ret void } +define float @print_fmuladd_strict(float* %a, float* %b, i64 %n) { +; CHECK-LABEL: Checking a loop in "print_fmuladd_strict" +; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { +; CHECK-NEXT: for.body: +; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%sum.07> = phi ir<0.000000e+00>, ir<%muladd> +; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%a>, ir<%iv> +; CHECK-NEXT: WIDEN ir<%0> = load ir<%arrayidx> +; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr ir<%b>, ir<%iv> +; CHECK-NEXT: WIDEN ir<%1> = load ir<%arrayidx2> +; CHECK-NEXT: EMIT vp<%6> = fmul nnan ninf nsz ir<%0> ir<%1> +; CHECK-NEXT: REDUCE ir<%muladd> = ir<%sum.07> + reduce.fadd (vp<%6>) +; CHECK-NEXT: No successors +; CHECK-NEXT: } + +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %for.body ] + %arrayidx = getelementptr inbounds float, float* %a, i64 %iv + %0 = load float, float* %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds float, float* %b, i64 %iv + %1 = load float, float* %arrayidx2, align 4 + %muladd = tail call nnan ninf nsz float @llvm.fmuladd.f32(float %0, float %1, float %sum.07) + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body + +for.end: + ret float %muladd +} + declare float @llvm.sqrt.f32(float) nounwind readnone +declare float @llvm.fmuladd.f32(float, float, float)