Index: docs/LangRef.rst =================================================================== --- docs/LangRef.rst +++ docs/LangRef.rst @@ -4608,13 +4608,13 @@ int i; // offset 0 float f; // offset 4 }; - + struct Outer { float f; // offset 0 double d; // offset 4 struct Inner inner_a; // offset 12 }; - + void f(struct Outer* outer, struct Inner* inner, float* f, int* i, char* c) { outer->f = 0; // tag0: (OuterStructTy, FloatScalarTy, 0) outer->inner_a.i = 0; // tag1: (OuterStructTy, IntScalarTy, 12) @@ -5150,10 +5150,10 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``invariant.group`` metadata may be attached to ``load``/``store`` instructions. -The existence of the ``invariant.group`` metadata on the instruction tells -the optimizer that every ``load`` and ``store`` to the same pointer operand -within the same invariant group can be assumed to load or store the same -value (but see the ``llvm.invariant.group.barrier`` intrinsic which affects +The existence of the ``invariant.group`` metadata on the instruction tells +the optimizer that every ``load`` and ``store`` to the same pointer operand +within the same invariant group can be assumed to load or store the same +value (but see the ``llvm.invariant.group.barrier`` intrinsic which affects when two pointers are considered the same). Pointers returned by bitcast or getelementptr with only zero indices are considered the same. @@ -5166,26 +5166,26 @@ %ptr = alloca i8 store i8 42, i8* %ptr, !invariant.group !0 call void @foo(i8* %ptr) - + %a = load i8, i8* %ptr, !invariant.group !0 ; Can assume that value under %ptr didn't change call void @foo(i8* %ptr) %b = load i8, i8* %ptr, !invariant.group !1 ; Can't assume anything, because group changed - - %newPtr = call i8* @getPointer(i8* %ptr) + + %newPtr = call i8* @getPointer(i8* %ptr) %c = load i8, i8* %newPtr, !invariant.group !0 ; Can't assume anything, because we only have information about %ptr - + %unknownValue = load i8, i8* @unknownPtr store i8 %unknownValue, i8* %ptr, !invariant.group !0 ; Can assume that %unknownValue == 42 - + call void @foo(i8* %ptr) %newPtr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr) %d = load i8, i8* %newPtr2, !invariant.group !0 ; Can't step through invariant.group.barrier to get value of %ptr - + ... declare void @foo(i8*) declare i8* @getPointer(i8*) declare i8* @llvm.invariant.group.barrier(i8*) - + !0 = !{!"magic ptr"} !1 = !{!"other ptr"} @@ -5194,7 +5194,7 @@ to the SSA value of the pointer operand. .. code-block:: llvm - + %v = load i8, i8* %x, !invariant.group !0 ; if %x mustalias %y then we can replace the above instruction with %v = load i8, i8* %y @@ -6648,9 +6648,9 @@ Note that unsigned integer remainder and signed integer remainder are distinct operations; for signed integer remainder, use '``srem``'. - + Taking the remainder of a division by zero is undefined behavior. -For vectors, if any element of the divisor is zero, the operation has +For vectors, if any element of the divisor is zero, the operation has undefined behavior. Example: @@ -6702,7 +6702,7 @@ distinct operations; for unsigned integer remainder, use '``urem``'. Taking the remainder of a division by zero is undefined behavior. -For vectors, if any element of the divisor is zero, the operation has +For vectors, if any element of the divisor is zero, the operation has undefined behavior. Overflow also leads to undefined behavior; this is a rare case, but can occur, for example, by taking the remainder of a 32-bit division of @@ -7575,7 +7575,7 @@ instructions to save cache bandwidth, such as the ``MOVNT`` instruction on x86. -The optional ``!invariant.group`` metadata must reference a +The optional ``!invariant.group`` metadata must reference a single metadata name ````. See ``invariant.group`` metadata. Semantics: @@ -7681,10 +7681,10 @@ to operate on, a value to compare to the value currently be at that address, and a new value to place at that address if the compared values are equal. The type of '' must be an integer or pointer type whose -bit width is a power of two greater than or equal to eight and less +bit width is a power of two greater than or equal to eight and less than or equal to a target-specific size limit. '' and '' must -have the same type, and the type of '' must be a pointer to -that type. If the ``cmpxchg`` is marked as ``volatile``, then the +have the same type, and the type of '' must be a pointer to +that type. If the ``cmpxchg`` is marked as ``volatile``, then the optimizer is not allowed to modify the number or order of execution of this ``cmpxchg`` with other :ref:`volatile operations `. @@ -8972,7 +8972,7 @@ ``tail`` or ``musttail`` markers to the call. It is used to prevent tail call optimization from being performed on the call. -#. The optional ``fast-math flags`` marker indicates that the call has one or more +#. The optional ``fast-math flags`` marker indicates that the call has one or more :ref:`fast-math flags `, which are optimization hints to enable otherwise unsafe floating-point optimizations. Fast-math flags are only valid for calls that return a floating-point scalar or vector type. @@ -12714,7 +12714,7 @@ Overview: """"""""" -The '``llvm.invariant.group.barrier``' intrinsic can be used when an invariant +The '``llvm.invariant.group.barrier``' intrinsic can be used when an invariant established by invariant.group metadata no longer holds, to obtain a new pointer value that does not carry the invariant information. @@ -12728,7 +12728,7 @@ Semantics: """""""""" -Returns another pointer that aliases its argument but which is considered different +Returns another pointer that aliases its argument but which is considered different for the purposes of ``load``/``store`` ``invariant.group`` metadata. Constrained Floating Point Intrinsics @@ -12806,7 +12806,7 @@ Any FP exception that would have been raised by the original code must be raised by the transformed code, and the transformed code must not raise any FP exceptions that would not have been raised by the original code. This is the -exception behavior argument that will be used if the code being compiled reads +exception behavior argument that will be used if the code being compiled reads the FP exception status flags, but this mode can also be used with code that unmasks FP exceptions. @@ -12824,7 +12824,7 @@ :: - declare + declare @llvm.experimental.constrained.fadd( , , metadata , metadata ) @@ -12861,7 +12861,7 @@ :: - declare + declare @llvm.experimental.constrained.fsub( , , metadata , metadata ) @@ -12898,7 +12898,7 @@ :: - declare + declare @llvm.experimental.constrained.fmul( , , metadata , metadata ) @@ -12935,7 +12935,7 @@ :: - declare + declare @llvm.experimental.constrained.fdiv( , , metadata , metadata ) @@ -12972,7 +12972,7 @@ :: - declare + declare @llvm.experimental.constrained.frem( , , metadata , metadata ) @@ -13001,8 +13001,46 @@ The value produced is the floating point remainder from the division of the two value operands and has the same type as the operands. The remainder has the -same sign as the dividend. +same sign as the dividend. + +'``llvm.experimental.constrained.fma``' Intrinsic +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +:: + + declare + @llvm.experimental.constrained.fma( , , , + metadata , + metadata ) + +Overview: +""""""""" + +The '``llvm.experimental.constrained.fma``' intrinsic returns the multiply-add result +with multiply of the first and second operand and then add the third one. + + + +Arguments: +"""""""""" + +The first three arguments to the '``llvm.experimental.constrained.fma``' +intrinsic must be :ref:`floating point ` or :ref:`vector ` +of floating point values. All arguments must have identical types. + +The fourth and fifth arguments specify the rounding mode and exception +behavior as described above. The rounding mode argument has no effect, since +the result of frem is never rounded, but the argument is included for +consistency with the other constrained floating point intrinsics. + +Semantics: +"""""""""" +The value produced is the mupliy the two value operands and then add the value of the +third operand. And the produced result has the same type as the operands. Constrained libm-equivalent Intrinsics -------------------------------------- @@ -13026,7 +13064,7 @@ :: - declare + declare @llvm.experimental.constrained.sqrt( , metadata , metadata ) @@ -13063,7 +13101,7 @@ :: - declare + declare @llvm.experimental.constrained.pow( , , metadata , metadata ) @@ -13100,7 +13138,7 @@ :: - declare + declare @llvm.experimental.constrained.powi( , i32 , metadata , metadata ) @@ -13139,7 +13177,7 @@ :: - declare + declare @llvm.experimental.constrained.sin( , metadata , metadata ) @@ -13175,7 +13213,7 @@ :: - declare + declare @llvm.experimental.constrained.cos( , metadata , metadata ) @@ -13211,7 +13249,7 @@ :: - declare + declare @llvm.experimental.constrained.exp( , metadata , metadata ) @@ -13246,7 +13284,7 @@ :: - declare + declare @llvm.experimental.constrained.exp2( , metadata , metadata ) @@ -13282,7 +13320,7 @@ :: - declare + declare @llvm.experimental.constrained.log( , metadata , metadata ) @@ -13318,7 +13356,7 @@ :: - declare + declare @llvm.experimental.constrained.log10( , metadata , metadata ) @@ -13353,7 +13391,7 @@ :: - declare + declare @llvm.experimental.constrained.log2( , metadata , metadata ) @@ -13388,7 +13426,7 @@ :: - declare + declare @llvm.experimental.constrained.rint( , metadata , metadata ) @@ -13427,7 +13465,7 @@ :: - declare + declare @llvm.experimental.constrained.nearbyint( , metadata , metadata ) @@ -14167,7 +14205,7 @@ memory from the source location to the destination location. These locations are not allowed to overlap. The memory copy is performed as a sequence of load/store operations where each access is guaranteed to be a multiple of ``element_size`` bytes wide and -aligned at an ``element_size`` boundary. +aligned at an ``element_size`` boundary. The order of the copy is unspecified. The same value may be read from the source buffer many times, but only one write is issued to the destination buffer per @@ -14242,7 +14280,7 @@ of memory from the source location to the destination location. These locations are allowed to overlap. The memory copy is performed as a sequence of load/store operations where each access is guaranteed to be a multiple of ``element_size`` -bytes wide and aligned at an ``element_size`` boundary. +bytes wide and aligned at an ``element_size`` boundary. The order of the copy is unspecified. The same value may be read from the source buffer many times, but only one write is issued to the destination buffer per @@ -14317,7 +14355,7 @@ The '``llvm.memset.element.unordered.atomic.*``' intrinsic sets the ``len`` bytes of memory starting at the destination location to the given ``value``. The memory is set with a sequence of store operations where each access is guaranteed to be a -multiple of ``element_size`` bytes wide and aligned at an ``element_size`` boundary. +multiple of ``element_size`` bytes wide and aligned at an ``element_size`` boundary. The order of the assignment is unspecified. Only one write is issued to the destination buffer per element. It is well defined to have concurrent reads and Index: include/llvm/CodeGen/ISDOpcodes.h =================================================================== --- include/llvm/CodeGen/ISDOpcodes.h +++ include/llvm/CodeGen/ISDOpcodes.h @@ -263,7 +263,7 @@ /// They are used to limit optimizations while the DAG is being /// optimized. STRICT_FADD, STRICT_FSUB, STRICT_FMUL, STRICT_FDIV, STRICT_FREM, - + STRICT_FMA, /// Constrained versions of libm-equivalent floating point intrinsics. /// These will be lowered to the equivalent non-constrained pseudo-op /// (or expanded to the equivalent library call) before final selection. Index: include/llvm/CodeGen/SelectionDAGNodes.h =================================================================== --- include/llvm/CodeGen/SelectionDAGNodes.h +++ include/llvm/CodeGen/SelectionDAGNodes.h @@ -623,13 +623,14 @@ /// Test if this node is a strict floating point pseudo-op. bool isStrictFPOpcode() { switch (NodeType) { - default: + default: return false; case ISD::STRICT_FADD: case ISD::STRICT_FSUB: case ISD::STRICT_FMUL: case ISD::STRICT_FDIV: case ISD::STRICT_FREM: + case ISD::STRICT_FMA: case ISD::STRICT_FSQRT: case ISD::STRICT_FPOW: case ISD::STRICT_FPOWI: Index: include/llvm/IR/IntrinsicInst.h =================================================================== --- include/llvm/IR/IntrinsicInst.h +++ include/llvm/IR/IntrinsicInst.h @@ -167,6 +167,7 @@ }; bool isUnaryOp() const; + bool isTernaryOp() const; RoundingMode getRoundingMode() const; ExceptionBehavior getExceptionBehavior() const; @@ -178,6 +179,7 @@ case Intrinsic::experimental_constrained_fmul: case Intrinsic::experimental_constrained_fdiv: case Intrinsic::experimental_constrained_frem: + case Intrinsic::experimental_constrained_fma: case Intrinsic::experimental_constrained_sqrt: case Intrinsic::experimental_constrained_pow: case Intrinsic::experimental_constrained_powi: Index: include/llvm/IR/Intrinsics.td =================================================================== --- include/llvm/IR/Intrinsics.td +++ include/llvm/IR/Intrinsics.td @@ -490,6 +490,13 @@ llvm_metadata_ty, llvm_metadata_ty ]>; + def int_experimental_constrained_fma : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + // These intrinsics are sensitive to the rounding mode so we need constrained // versions of each of them. When strict rounding and exception control are // not required the non-constrained versions of these intrinsics should be Index: lib/CodeGen/SelectionDAG/LegalizeDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -907,6 +907,7 @@ case ISD::STRICT_FSQRT: EqOpc = ISD::FSQRT; break; case ISD::STRICT_FPOW: EqOpc = ISD::FPOW; break; case ISD::STRICT_FPOWI: EqOpc = ISD::FPOWI; break; + case ISD::STRICT_FMA: EqOpc = ISD::FMA; break; case ISD::STRICT_FSIN: EqOpc = ISD::FSIN; break; case ISD::STRICT_FCOS: EqOpc = ISD::FCOS; break; case ISD::STRICT_FEXP: EqOpc = ISD::FEXP; break; @@ -1072,6 +1073,7 @@ } break; case ISD::STRICT_FSQRT: + case ISD::STRICT_FMA: case ISD::STRICT_FPOW: case ISD::STRICT_FPOWI: case ISD::STRICT_FSIN: @@ -1240,7 +1242,7 @@ // If the index is dependent on the store we will introduce a cycle when // creating the load (the load uses the index, and by replacing the chain // we will make the index dependent on the load). Also, the store might be - // dependent on the extractelement and introduce a cycle when creating + // dependent on the extractelement and introduce a cycle when creating // the load. if (SDNode::hasPredecessorHelper(ST, Visited, Worklist) || ST->hasPredecessor(Op.getNode())) @@ -4060,6 +4062,10 @@ Results.push_back(ExpandFPLibCall(Node, RTLIB::FMA_F32, RTLIB::FMA_F64, RTLIB::FMA_F80, RTLIB::FMA_F128, RTLIB::FMA_PPCF128)); + case ISD::STRICT_FMA: + Results.push_back(ExpandFPLibCall(Node, RTLIB::FMA_F32, RTLIB::FMA_F64, + RTLIB::FMA_F80, RTLIB::FMA_F128, + RTLIB::FMA_PPCF128)); break; case ISD::FADD: Results.push_back(ExpandFPLibCall(Node, RTLIB::ADD_F32, RTLIB::ADD_F64, Index: lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -6640,6 +6640,7 @@ unsigned OrigOpc = Node->getOpcode(); unsigned NewOpc; bool IsUnary = false; + bool IsTernary = false; switch (OrigOpc) { default: llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); @@ -6648,6 +6649,7 @@ case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break; case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break; case ISD::STRICT_FREM: NewOpc = ISD::FREM; break; + case ISD::STRICT_FMA: NewOpc = ISD::FMA; IsTernary = true; break; case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; IsUnary = true; break; case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break; case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break; @@ -6674,6 +6676,10 @@ SDNode *Res = nullptr; if (IsUnary) Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1) }); + else if (IsTernary) + Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), + Node->getOperand(2), + Node->getOperand(3)}); else Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), Node->getOperand(2) }); Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -5432,6 +5432,7 @@ case Intrinsic::experimental_constrained_fmul: case Intrinsic::experimental_constrained_fdiv: case Intrinsic::experimental_constrained_frem: + case Intrinsic::experimental_constrained_fma: case Intrinsic::experimental_constrained_sqrt: case Intrinsic::experimental_constrained_pow: case Intrinsic::experimental_constrained_powi: @@ -5963,6 +5964,9 @@ case Intrinsic::experimental_constrained_frem: Opcode = ISD::STRICT_FREM; break; + case Intrinsic::experimental_constrained_fma: + Opcode = ISD::STRICT_FMA; + break; case Intrinsic::experimental_constrained_sqrt: Opcode = ISD::STRICT_FSQRT; break; @@ -6009,10 +6013,15 @@ SDVTList VTs = DAG.getVTList(ValueVTs); SDValue Result; if (FPI.isUnaryOp()) - Result = DAG.getNode(Opcode, sdl, VTs, + Result = DAG.getNode(Opcode, sdl, VTs, { Chain, getValue(FPI.getArgOperand(0)) }); + else if (FPI.isTernaryOp()) + Result = DAG.getNode(Opcode, sdl, VTs, + { Chain, getValue(FPI.getArgOperand(0)), + getValue(FPI.getArgOperand(1)), + getValue(FPI.getArgOperand(2)) }); else - Result = DAG.getNode(Opcode, sdl, VTs, + Result = DAG.getNode(Opcode, sdl, VTs, { Chain, getValue(FPI.getArgOperand(0)), getValue(FPI.getArgOperand(1)) }); Index: lib/IR/IntrinsicInst.cpp =================================================================== --- lib/IR/IntrinsicInst.cpp +++ lib/IR/IntrinsicInst.cpp @@ -14,10 +14,10 @@ // are all subclasses of the CallInst class. Note that none of these classes // has state or virtual methods, which is an important part of this gross/neat // hack working. -// +// // In some cases, arguments to intrinsics need to be generic and are defined as // type pointer to empty struct { }*. To access the real item of interest the -// cast instruction needs to be stripped away. +// cast instruction needs to be stripped away. // //===----------------------------------------------------------------------===// @@ -98,7 +98,7 @@ ConstrainedFPIntrinsic::RoundingMode ConstrainedFPIntrinsic::getRoundingMode() const { unsigned NumOperands = getNumArgOperands(); - Metadata *MD = + Metadata *MD = dyn_cast(getArgOperand(NumOperands - 2))->getMetadata(); if (!MD || !isa(MD)) return rmInvalid; @@ -118,7 +118,7 @@ ConstrainedFPIntrinsic::ExceptionBehavior ConstrainedFPIntrinsic::getExceptionBehavior() const { unsigned NumOperands = getNumArgOperands(); - Metadata *MD = + Metadata *MD = dyn_cast(getArgOperand(NumOperands - 1))->getMetadata(); if (!MD || !isa(MD)) return ebInvalid; @@ -132,7 +132,7 @@ bool ConstrainedFPIntrinsic::isUnaryOp() const { switch (getIntrinsicID()) { - default: + default: return false; case Intrinsic::experimental_constrained_sqrt: case Intrinsic::experimental_constrained_sin: @@ -147,3 +147,13 @@ return true; } } + +bool ConstrainedFPIntrinsic::isTernaryOp() const { + switch (getIntrinsicID()) { + default: + return false; + case Intrinsic::experimental_constrained_fma: + return true; + } +} + Index: lib/IR/Verifier.cpp =================================================================== --- lib/IR/Verifier.cpp +++ lib/IR/Verifier.cpp @@ -3969,6 +3969,7 @@ case Intrinsic::experimental_constrained_fmul: case Intrinsic::experimental_constrained_fdiv: case Intrinsic::experimental_constrained_frem: + case Intrinsic::experimental_constrained_fma: case Intrinsic::experimental_constrained_sqrt: case Intrinsic::experimental_constrained_pow: case Intrinsic::experimental_constrained_powi: @@ -4429,8 +4430,9 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { unsigned NumOperands = FPI.getNumArgOperands(); - Assert(((NumOperands == 3 && FPI.isUnaryOp()) || (NumOperands == 4)), - "invalid arguments for constrained FP intrinsic", &FPI); + Assert(((NumOperands == 5 && FPI.isTernaryOp()) || + (NumOperands == 3 && FPI.isUnaryOp()) || (NumOperands == 4)), + "invalid arguments for constrained FP intrinsic", &FPI); Assert(isa(FPI.getArgOperand(NumOperands-1)), "invalid exception behavior argument", &FPI); Assert(isa(FPI.getArgOperand(NumOperands-2)), Index: lib/Target/X86/X86ISelDAGToDAG.cpp =================================================================== --- lib/Target/X86/X86ISelDAGToDAG.cpp +++ lib/Target/X86/X86ISelDAGToDAG.cpp @@ -2012,6 +2012,14 @@ switch (Opcode) { default: break; + case ISD::FMA: { + SDValue ISDFMA = CurDAG->getNode( + X86ISD::FMADD, SDLoc(Node), Node->getValueType(0), Node->getOperand(0), + Node->getOperand(1), Node->getOperand(2)); + ReplaceUses(SDValue(Node, 0), ISDFMA); + SelectCode(ISDFMA.getNode()); + return; + } case ISD::BRIND: { if (Subtarget->isTargetNaCl()) // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We Index: test/CodeGen/X86/fp-intrinsics.ll =================================================================== --- test/CodeGen/X86/fp-intrinsics.ll +++ test/CodeGen/X86/fp-intrinsics.ll @@ -1,4 +1,5 @@ ; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s +; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck --check-prefix=FMA64 %s ; Verify that constants aren't folded to inexact results when the rounding mode ; is unknown. @@ -81,7 +82,7 @@ ; return a; ; } ; -; +; ; CHECK-LABEL: f4: ; CHECK: testl ; CHECK: jle @@ -239,6 +240,22 @@ ret double %result } +; Verify that fma(42.1) isn't simplified when the rounding mode is +; unknown. +; CHECK-LABEL: f17 +; CHECK: jmp fma # TAILCALL +; FMA64: vfmadd213sd +define double @f17() { +entry: + %result = call double @llvm.experimental.constrained.fma.f64( + double 42.1, + double 42.1, + double 42.1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret double %result +} + @llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata" declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata) @@ -256,3 +273,4 @@ declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata) +declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) Index: test/Feature/fp-intrinsics.ll =================================================================== --- test/Feature/fp-intrinsics.ll +++ test/Feature/fp-intrinsics.ll @@ -73,7 +73,7 @@ ; return a; ; } ; -; +; ; CHECK-LABEL: @f4 ; CHECK-NOT: select ; CHECK: br i1 %cmp @@ -94,7 +94,6 @@ ret double %a.0 } - ; Verify that sqrt(42.0) isn't simplified when the rounding mode is unknown. ; CHECK-LABEL: f5 ; CHECK: call double @llvm.experimental.constrained.sqrt @@ -231,6 +230,18 @@ ret double %result } +; Verify that fma(42.1) isn't simplified when the rounding mode is +; unknown. +; CHECK-LABEL: f17 +; CHECK: call double @llvm.experimental.constrained.fma +define double @f17() { +entry: + %result = call double @llvm.experimental.constrained.fma.f64(double 42.1, double 42.1, double 42.1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret double %result +} + @llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata" declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata) @@ -248,3 +259,4 @@ declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata) +declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)