diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -1469,6 +1469,9 @@ /// static auto predicates() { return FCmpPredicates(); } + /// Evaluate the given predicate for the constant values. + static bool evaluate(Predicate Pred, const APFloat &Op1, const APFloat &Op2); + /// Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const Instruction *I) { return I->getOpcode() == Instruction::FCmp; diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -1508,6 +1508,8 @@ case Intrinsic::experimental_constrained_trunc: case Intrinsic::experimental_constrained_nearbyint: case Intrinsic::experimental_constrained_rint: + case Intrinsic::experimental_constrained_fcmp: + case Intrinsic::experimental_constrained_fcmps: return true; default: return false; @@ -1767,31 +1769,24 @@ /// /// \param CI Constrained intrinsic call. /// \param St Exception flags raised during constant evaluation. -static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI, +static bool mayFoldConstrained(const ConstrainedFPIntrinsic *CI, APFloat::opStatus St) { Optional ORM = CI->getRoundingMode(); Optional EB = CI->getExceptionBehavior(); // If the operation does not change exception status flags, it is safe // to fold. - if (St == APFloat::opStatus::opOK) { - // When FP exceptions are not ignored, intrinsic call will not be - // eliminated, because it is considered as having side effect. But we - // know that its evaluation does not raise exceptions, so side effect - // is absent. To allow removing the call, mark it as not accessing memory. - if (EB && *EB != fp::ExceptionBehavior::ebIgnore) - CI->addFnAttr(Attribute::ReadNone); + if (St == APFloat::opStatus::opOK) return true; - } // If evaluation raised FP exception, the result can depend on rounding // mode. If the latter is unknown, folding is not possible. - if (!ORM || *ORM == RoundingMode::Dynamic) + if (ORM && *ORM == RoundingMode::Dynamic) return false; // If FP exceptions are ignored, fold the call, even if such exception is // raised. - if (!EB || *EB != fp::ExceptionBehavior::ebStrict) + if (EB && *EB != fp::ExceptionBehavior::ebStrict) return true; // Leave the calculation for runtime so that exception flags be correctly set @@ -2289,6 +2284,26 @@ return nullptr; } +static Constant *evaluateCompare(Intrinsic::ID IntrinsicID, + const CallBase *Call, const APFloat &Op1, + const APFloat &Op2) { + APFloat::opStatus St = APFloat::opOK; + auto FCmp = cast(Call); + FCmpInst::Predicate Cond = FCmp->getPredicate(); + if (IntrinsicID == Intrinsic::experimental_constrained_fcmp) { + if (Op1.isSignaling() || Op2.isSignaling()) + St = APFloat::opInvalidOp; + } else { + assert(IntrinsicID == Intrinsic::experimental_constrained_fcmps); + if (Op1.isNaN() || Op2.isNaN()) + St = APFloat::opInvalidOp; + } + bool Result = FCmpInst::evaluate(Cond, Op1, Op2); + if (mayFoldConstrained(FCmp, St)) + return ConstantInt::get(Call->getType(), Result); + return nullptr; +} + static Constant *ConstantFoldScalarCall2(StringRef Name, Intrinsic::ID IntrinsicID, Type *Ty, @@ -2317,8 +2332,6 @@ } if (const auto *Op1 = dyn_cast(Operands[0])) { - if (!Ty->isFloatingPointTy()) - return nullptr; const APFloat &Op1V = Op1->getValueAPF(); if (const auto *Op2 = dyn_cast(Operands[1])) { @@ -2348,9 +2361,11 @@ case Intrinsic::experimental_constrained_frem: St = Res.mod(Op2V); break; + case Intrinsic::experimental_constrained_fcmp: + case Intrinsic::experimental_constrained_fcmps: + return evaluateCompare(IntrinsicID, Call, Op1V, Op2V); } - if (mayFoldConstrained(const_cast(ConstrIntr), - St)) + if (mayFoldConstrained(ConstrIntr, St)) return ConstantFP::get(Ty->getContext(), Res); return nullptr; } @@ -2778,8 +2793,7 @@ St = Res.fusedMultiplyAdd(C2, C3, RM); break; } - if (mayFoldConstrained( - const_cast(ConstrIntr), St)) + if (mayFoldConstrained(ConstrIntr, St)) return ConstantFP::get(Ty->getContext(), Res); return nullptr; } diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -1801,46 +1801,8 @@ } else if (isa(C1) && isa(C2)) { const APFloat &C1V = cast(C1)->getValueAPF(); const APFloat &C2V = cast(C2)->getValueAPF(); - APFloat::cmpResult R = C1V.compare(C2V); - switch (pred) { - default: llvm_unreachable("Invalid FCmp Predicate"); - case FCmpInst::FCMP_FALSE: return Constant::getNullValue(ResultTy); - case FCmpInst::FCMP_TRUE: return Constant::getAllOnesValue(ResultTy); - case FCmpInst::FCMP_UNO: - return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered); - case FCmpInst::FCMP_ORD: - return ConstantInt::get(ResultTy, R!=APFloat::cmpUnordered); - case FCmpInst::FCMP_UEQ: - return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered || - R==APFloat::cmpEqual); - case FCmpInst::FCMP_OEQ: - return ConstantInt::get(ResultTy, R==APFloat::cmpEqual); - case FCmpInst::FCMP_UNE: - return ConstantInt::get(ResultTy, R!=APFloat::cmpEqual); - case FCmpInst::FCMP_ONE: - return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan || - R==APFloat::cmpGreaterThan); - case FCmpInst::FCMP_ULT: - return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered || - R==APFloat::cmpLessThan); - case FCmpInst::FCMP_OLT: - return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan); - case FCmpInst::FCMP_UGT: - return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered || - R==APFloat::cmpGreaterThan); - case FCmpInst::FCMP_OGT: - return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan); - case FCmpInst::FCMP_ULE: - return ConstantInt::get(ResultTy, R!=APFloat::cmpGreaterThan); - case FCmpInst::FCMP_OLE: - return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan || - R==APFloat::cmpEqual); - case FCmpInst::FCMP_UGE: - return ConstantInt::get(ResultTy, R!=APFloat::cmpLessThan); - case FCmpInst::FCMP_OGE: - return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan || - R==APFloat::cmpEqual); - } + CmpInst::Predicate Predicate = CmpInst::Predicate(pred); + return ConstantInt::get(ResultTy, FCmpInst::evaluate(Predicate, C1V, C2V)); } else if (auto *C1VTy = dyn_cast(C1->getType())) { // Fast path for splatted constants. diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -4240,6 +4240,47 @@ return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); } +bool FCmpInst::evaluate(Predicate Pred, const APFloat &Op1, + const APFloat &Op2) { + APFloat::cmpResult R = Op1.compare(Op2); + switch (Pred) { + default: + llvm_unreachable("Invalid FCmp Predicate"); + case FCmpInst::FCMP_FALSE: + return false; + case FCmpInst::FCMP_TRUE: + return true; + case FCmpInst::FCMP_UNO: + return R == APFloat::cmpUnordered; + case FCmpInst::FCMP_ORD: + return R != APFloat::cmpUnordered; + case FCmpInst::FCMP_UEQ: + return R == APFloat::cmpUnordered || R == APFloat::cmpEqual; + case FCmpInst::FCMP_OEQ: + return R == APFloat::cmpEqual; + case FCmpInst::FCMP_UNE: + return R != APFloat::cmpEqual; + case FCmpInst::FCMP_ONE: + return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan; + case FCmpInst::FCMP_ULT: + return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan; + case FCmpInst::FCMP_OLT: + return R == APFloat::cmpLessThan; + case FCmpInst::FCMP_UGT: + return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan; + case FCmpInst::FCMP_OGT: + return R == APFloat::cmpGreaterThan; + case FCmpInst::FCMP_ULE: + return R != APFloat::cmpGreaterThan; + case FCmpInst::FCMP_OLE: + return R == APFloat::cmpLessThan || R == APFloat::cmpEqual; + case FCmpInst::FCMP_UGE: + return R != APFloat::cmpLessThan; + case FCmpInst::FCMP_OGE: + return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual; + } +} + //===----------------------------------------------------------------------===// // SwitchInst Implementation //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Transforms/Scalar/InstSimplifyPass.cpp b/llvm/lib/Transforms/Scalar/InstSimplifyPass.cpp --- a/llvm/lib/Transforms/Scalar/InstSimplifyPass.cpp +++ b/llvm/lib/Transforms/Scalar/InstSimplifyPass.cpp @@ -30,6 +30,23 @@ STATISTIC(NumSimplified, "Number of redundant instructions removed"); +static bool isReplacedInstructionDead(Instruction *I) { + if (!I->use_empty()) + return false; + if (wouldInstructionBeTriviallyDead(I)) + return true; + // Constrained intrinsics usually are marked as having side effect to indicate + // possible effect on floating point environment. If however such instruction + // is replaced, it is assumed that the environment is actually is not changed, + // the change is ignored, or the replacement has the same effect. In this case + // remove the side effect and consider the instruction as dead. + if (auto *CI = dyn_cast(I)) { + CI->addFnAttr(Attribute::ReadNone); + return true; + } + return false; +} + static bool runImpl(Function &F, const SimplifyQuery &SQ, OptimizationRemarkEmitter *ORE) { SmallPtrSet S1, S2, *ToSimplify = &S1, *Next = &S2; @@ -51,7 +68,7 @@ continue; // Don't waste time simplifying dead/unused instructions. - if (isInstructionTriviallyDead(&I)) { + if (isReplacedInstructionDead(&I)) { DeadInstsInBB.push_back(&I); Changed = true; } else if (!I.use_empty()) { @@ -62,8 +79,8 @@ I.replaceAllUsesWith(V); ++NumSimplified; Changed = true; - // A call can get simplified, but it may not be trivially dead. - if (isInstructionTriviallyDead(&I)) + // A call can get simplified, but it may not be dead. + if (isReplacedInstructionDead(&I)) DeadInstsInBB.push_back(&I); } } diff --git a/llvm/test/Transforms/InstSimplify/constfold-constrained.ll b/llvm/test/Transforms/InstSimplify/constfold-constrained.ll --- a/llvm/test/Transforms/InstSimplify/constfold-constrained.ll +++ b/llvm/test/Transforms/InstSimplify/constfold-constrained.ll @@ -17,7 +17,6 @@ define double @floor_02() #0 { ; CHECK-LABEL: @floor_02( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.floor.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0:[0-9]+]] ; CHECK-NEXT: ret double -1.100000e+01 ; entry: @@ -40,7 +39,6 @@ define double @ceil_02() #0 { ; CHECK-LABEL: @ceil_02( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.ceil.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret double -1.000000e+01 ; entry: @@ -63,7 +61,6 @@ define double @trunc_02() #0 { ; CHECK-LABEL: @trunc_02( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret double -1.000000e+01 ; entry: @@ -86,7 +83,6 @@ define double @round_02() #0 { ; CHECK-LABEL: @round_02( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.round.f64(double -1.050000e+01, metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret double -1.100000e+01 ; entry: @@ -120,7 +116,6 @@ define double @nearbyint_03() #0 { ; CHECK-LABEL: @nearbyint_03( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.towardzero", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret double 1.000000e+01 ; entry: @@ -132,7 +127,6 @@ define double @nearbyint_04() #0 { ; CHECK-LABEL: @nearbyint_04( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret double 1.000000e+01 ; entry: @@ -144,7 +138,7 @@ define double @nearbyint_05() #0 { ; CHECK-LABEL: @nearbyint_05( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0:[0-9]+]] ; CHECK-NEXT: ret double [[RESULT]] ; entry: @@ -179,7 +173,6 @@ define double @nonfinite_03() #0 { ; CHECK-LABEL: @nonfinite_03( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double 0x7FF8000000000000, metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret double 0x7FF8000000000000 ; entry: @@ -191,7 +184,6 @@ define double @nonfinite_04() #0 { ; CHECK-LABEL: @nonfinite_04( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double 0x7FF0000000000000, metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret double 0x7FF0000000000000 ; entry: @@ -203,7 +195,6 @@ define double @rint_01() #0 { ; CHECK-LABEL: @rint_01( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.000000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret double 1.000000e+01 ; entry: @@ -415,6 +406,113 @@ } +; When exceptions are ignored, comparison of constants can be folded, even for (signaling) NaNs. +define i1 @cmp_eq_01() #0 { +; CHECK-LABEL: @cmp_eq_01( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i1 false +; +entry: + %result = call i1 @llvm.experimental.constrained.fcmp.f64(double 1.0, double 2.0, metadata !"oeq", metadata !"fpexcept.ignore") #0 + ret i1 %result +} + +define i1 @cmp_eq_02() #0 { +; CHECK-LABEL: @cmp_eq_02( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i1 true +; +entry: + %result = call i1 @llvm.experimental.constrained.fcmp.f64(double 2.0, double 2.0, metadata !"oeq", metadata !"fpexcept.ignore") #0 + ret i1 %result +} + +define i1 @cmp_eq_03() #0 { +; CHECK-LABEL: @cmp_eq_03( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i1 false +; +entry: + %result = call i1 @llvm.experimental.constrained.fcmp.f64(double 2.0, double 0x7ff8000000000000, metadata !"oeq", metadata !"fpexcept.ignore") #0 + ret i1 %result +} + +define i1 @cmp_eq_04() #0 { +; CHECK-LABEL: @cmp_eq_04( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i1 false +; +entry: + %result = call i1 @llvm.experimental.constrained.fcmp.f64(double 2.0, double 0x7ff4000000000000, metadata !"oeq", metadata !"fpexcept.ignore") #0 + ret i1 %result +} + +define i1 @cmp_eq_05() #0 { +; CHECK-LABEL: @cmp_eq_05( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i1 false +; +entry: + %result = call i1 @llvm.experimental.constrained.fcmps.f64(double 2.0, double 0x7ff8000000000000, metadata !"oeq", metadata !"fpexcept.ignore") #0 + ret i1 %result +} + +define i1 @cmp_eq_06() #0 { +; CHECK-LABEL: @cmp_eq_06( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i1 false +; +entry: + %result = call i1 @llvm.experimental.constrained.fcmps.f64(double 2.0, double 0x7ff4000000000000, metadata !"oeq", metadata !"fpexcept.ignore") #0 + ret i1 %result +} + +; Compare with SNAN is NOT folded if the exception behavior mode is not 'ignore'. +define i1 @cmp_eq_nan_01() #0 { +; CHECK-LABEL: @cmp_eq_nan_01( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret i1 [[RESULT]] +; +entry: + %result = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7ff4000000000000, double 1.0, metadata !"oeq", metadata !"fpexcept.strict") #0 + ret i1 %result +} + +define i1 @cmp_eq_nan_02() #0 { +; CHECK-LABEL: @cmp_eq_nan_02( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret i1 [[RESULT]] +; +entry: + %result = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7ff4000000000000, double 1.0, metadata !"oeq", metadata !"fpexcept.strict") #0 + ret i1 %result +} + +; Compare with QNAN is folded for fcmp but is NOT folded for fcmps if the exception behavior mode is not 'ignore'. +define i1 @cmp_eq_nan_03() #0 { +; CHECK-LABEL: @cmp_eq_nan_03( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i1 false +; +entry: + %result = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7ff8000000000000, double 1.0, metadata !"oeq", metadata !"fpexcept.strict") #0 + ret i1 %result +} + +define i1 @cmp_eq_nan_04() #0 { +; CHECK-LABEL: @cmp_eq_nan_04( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret i1 [[RESULT]] +; +entry: + %result = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7ff8000000000000, double 1.0, metadata !"oeq", metadata !"fpexcept.strict") #0 + ret i1 %result +} + + attributes #0 = { strictfp } declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata) @@ -433,4 +531,6 @@ declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) declare double @llvm.experimental.constrained.fmuladd.f64(double, double, double, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) diff --git a/llvm/test/Transforms/InstSimplify/fp-undef-poison-strictfp.ll b/llvm/test/Transforms/InstSimplify/fp-undef-poison-strictfp.ll --- a/llvm/test/Transforms/InstSimplify/fp-undef-poison-strictfp.ll +++ b/llvm/test/Transforms/InstSimplify/fp-undef-poison-strictfp.ll @@ -44,7 +44,6 @@ define float @fadd_poison_op0_strict(float %x) #0 { ; CHECK-LABEL: @fadd_poison_op0_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float poison, float [[X:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.fadd.f32(float poison, float %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -112,7 +111,6 @@ define float @fadd_poison_op1_strict(float %x) #0 { ; CHECK-LABEL: @fadd_poison_op1_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[X:%.*]], float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.fadd.f32(float %x, float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -184,7 +182,6 @@ define float @fsub_poison_op0_strict(float %x) { ; CHECK-LABEL: @fsub_poison_op0_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float poison, float [[X:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.fsub.f32(float poison, float %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -252,7 +249,6 @@ define float @fsub_poison_op1_strict(float %x) { ; CHECK-LABEL: @fsub_poison_op1_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[X:%.*]], float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.fsub.f32(float %x, float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -324,7 +320,6 @@ define float @fmul_poison_op0_strict(float %x) { ; CHECK-LABEL: @fmul_poison_op0_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float poison, float [[X:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.fmul.f32(float poison, float %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -392,7 +387,6 @@ define float @fmul_poison_op1_strict(float %x) { ; CHECK-LABEL: @fmul_poison_op1_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[X:%.*]], float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.fmul.f32(float %x, float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -464,7 +458,6 @@ define float @fdiv_poison_op0_strict(float %x) { ; CHECK-LABEL: @fdiv_poison_op0_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.fdiv.f32(float poison, float [[X:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.fdiv.f32(float poison, float %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -532,7 +525,6 @@ define float @fdiv_poison_op1_strict(float %x) { ; CHECK-LABEL: @fdiv_poison_op1_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.fdiv.f32(float [[X:%.*]], float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.fdiv.f32(float %x, float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -604,7 +596,6 @@ define float @frem_poison_op0_strict(float %x) { ; CHECK-LABEL: @frem_poison_op0_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.frem.f32(float poison, float [[X:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.frem.f32(float poison, float %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -672,7 +663,6 @@ define float @frem_poison_op1_strict(float %x) { ; CHECK-LABEL: @frem_poison_op1_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.frem.f32(float [[X:%.*]], float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.frem.f32(float %x, float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -744,7 +734,6 @@ define float @fma_poison_op0_strict(float %x, float %y) { ; CHECK-LABEL: @fma_poison_op0_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.fma.f32(float poison, float [[X:%.*]], float [[Y:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.fma.f32(float poison, float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -812,7 +801,6 @@ define float @fma_poison_op1_strict(float %x, float %y) { ; CHECK-LABEL: @fma_poison_op1_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.fma.f32(float [[X:%.*]], float poison, float [[Y:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.fma.f32(float %x, float poison, float %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 @@ -880,7 +868,6 @@ define float @fma_poison_op2_strict(float %x, float %y) { ; CHECK-LABEL: @fma_poison_op2_strict( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.experimental.constrained.fma.f32(float [[X:%.*]], float [[Y:%.*]], float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] ; CHECK-NEXT: ret float poison ; %r = call float @llvm.experimental.constrained.fma.f32(float %x, float %y, float poison, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 diff --git a/llvm/test/Transforms/InstSimplify/strictfp-fadd.ll b/llvm/test/Transforms/InstSimplify/strictfp-fadd.ll --- a/llvm/test/Transforms/InstSimplify/strictfp-fadd.ll +++ b/llvm/test/Transforms/InstSimplify/strictfp-fadd.ll @@ -135,8 +135,7 @@ define float @fadd_nnan_x_n0_ebstrict(float %a) #0 { ; CHECK-LABEL: @fadd_nnan_x_n0_ebstrict( -; CHECK-NEXT: [[RET:%.*]] = call nnan float @llvm.experimental.constrained.fadd.f32(float [[A:%.*]], float -0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: ret float [[A]] +; CHECK-NEXT: ret float [[A:%.*]] ; %ret = call nnan float @llvm.experimental.constrained.fadd.f32(float %a, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %ret @@ -144,8 +143,7 @@ define <2 x float> @fadd_vec_nnan_x_n0_ebstrict(<2 x float> %a) #0 { ; CHECK-LABEL: @fadd_vec_nnan_x_n0_ebstrict( -; CHECK-NEXT: [[RET:%.*]] = call nnan <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> [[A:%.*]], <2 x float> , metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: ret <2 x float> [[A]] +; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; %ret = call nnan <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %a, <2 x float>, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <2 x float> %ret @@ -297,8 +295,7 @@ define float @fold_fadd_nsz_nnan_x_0_ebstrict(float %a) #0 { ; CHECK-LABEL: @fold_fadd_nsz_nnan_x_0_ebstrict( -; CHECK-NEXT: [[ADD:%.*]] = call nnan nsz float @llvm.experimental.constrained.fadd.f32(float [[A:%.*]], float 0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: ret float [[A]] +; CHECK-NEXT: ret float [[A:%.*]] ; %add = call nsz nnan float @llvm.experimental.constrained.fadd.f32(float %a, float 0.0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret float %add @@ -306,8 +303,7 @@ define <2 x float> @fold_fadd_vec_nsz_nnan_x_0_ebstrict(<2 x float> %a) #0 { ; CHECK-LABEL: @fold_fadd_vec_nsz_nnan_x_0_ebstrict( -; CHECK-NEXT: [[ADD:%.*]] = call nnan nsz <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> [[A:%.*]], <2 x float> zeroinitializer, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] -; CHECK-NEXT: ret <2 x float> [[A]] +; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; %add = call nsz nnan <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %a, <2 x float> zeroinitializer, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret <2 x float> %add