Index: clang/lib/CodeGen/CGExpr.cpp
===================================================================
--- clang/lib/CodeGen/CGExpr.cpp
+++ clang/lib/CodeGen/CGExpr.cpp
@@ -187,6 +187,7 @@
 
   QualType BoolTy = getContext().BoolTy;
   SourceLocation Loc = E->getExprLoc();
+  CGFPOptionsRAII FPOptsRAII(*this, E);
   if (!E->getType()->isAnyComplexType())
     return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
 
Index: clang/lib/CodeGen/CGExprScalar.cpp
===================================================================
--- clang/lib/CodeGen/CGExprScalar.cpp
+++ clang/lib/CodeGen/CGExprScalar.cpp
@@ -2242,9 +2242,11 @@
   case CK_FloatingToIntegral:
   case CK_FloatingCast:
   case CK_FixedPointToFloating:
-  case CK_FloatingToFixedPoint:
+  case CK_FloatingToFixedPoint: {
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
     return EmitScalarConversion(Visit(E), E->getType(), DestTy,
                                 CE->getExprLoc());
+  }
   case CK_BooleanToSignedIntegral: {
     ScalarConversionOpts Opts;
     Opts.TreatBooleanAsSigned = true;
@@ -2255,8 +2257,10 @@
     return EmitIntToBoolConversion(Visit(E));
   case CK_PointerToBoolean:
     return EmitPointerToBoolConversion(Visit(E), E->getType());
-  case CK_FloatingToBoolean:
+  case CK_FloatingToBoolean: {
+    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
     return EmitFloatToBoolConversion(Visit(E));
+  }
   case CK_MemberPointerToBoolean: {
     llvm::Value *MemPtr = Visit(E);
     const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
Index: clang/lib/CodeGen/CodeGenFunction.h
===================================================================
--- clang/lib/CodeGen/CodeGenFunction.h
+++ clang/lib/CodeGen/CodeGenFunction.h
@@ -608,11 +608,15 @@
   class CGFPOptionsRAII {
   public:
     CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
+    CGFPOptionsRAII(CodeGenFunction &CGF, const Expr *E);
     ~CGFPOptionsRAII();
 
   private:
+    void ConstructorHelper(FPOptions FPFeatures);
     CodeGenFunction &CGF;
     FPOptions OldFPFeatures;
+    llvm::fp::ExceptionBehavior OldExcept;
+    llvm::RoundingMode OldRounding;
     Optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
   };
   FPOptions CurFPFeatures;
Index: clang/lib/CodeGen/CodeGenFunction.cpp
===================================================================
--- clang/lib/CodeGen/CodeGenFunction.cpp
+++ clang/lib/CodeGen/CodeGenFunction.cpp
@@ -25,6 +25,7 @@
 #include "clang/AST/Attr.h"
 #include "clang/AST/Decl.h"
 #include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
 #include "clang/AST/StmtCXX.h"
 #include "clang/AST/StmtObjC.h"
 #include "clang/Basic/Builtins.h"
@@ -131,10 +132,24 @@
 }
 
 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
+                                                  const Expr *E)
+    : CGF(CGF) {
+  ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
+}
+
+CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
                                                   FPOptions FPFeatures)
-    : CGF(CGF), OldFPFeatures(CGF.CurFPFeatures) {
+    : CGF(CGF) {
+  ConstructorHelper(FPFeatures);
+}
+
+void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
+  OldFPFeatures = CGF.CurFPFeatures;
   CGF.CurFPFeatures = FPFeatures;
 
+  OldExcept = CGF.Builder.getDefaultConstrainedExcept();
+  OldRounding = CGF.Builder.getDefaultConstrainedRounding();
+
   if (OldFPFeatures == FPFeatures)
     return;
 
@@ -175,6 +190,8 @@
 
 CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
   CGF.CurFPFeatures = OldFPFeatures;
+  CGF.Builder.setDefaultConstrainedExcept(OldExcept);
+  CGF.Builder.setDefaultConstrainedRounding(OldRounding);
 }
 
 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
Index: clang/lib/Sema/SemaExpr.cpp
===================================================================
--- clang/lib/Sema/SemaExpr.cpp
+++ clang/lib/Sema/SemaExpr.cpp
@@ -697,7 +697,7 @@
   //   If T is cv std::nullptr_t, the result is a null pointer constant.
   CastKind CK = T->isNullPtrType() ? CK_NullToPointer : CK_LValueToRValue;
   Res = ImplicitCastExpr::Create(Context, T, CK, E, nullptr, VK_RValue,
-                                 FPOptionsOverride());
+                                 CurFPFeatureOverrides());
 
   // C11 6.3.2.1p2:
   //   ... if the lvalue has atomic type, the value has the non-atomic version
Index: clang/test/CodeGen/aarch64-v8.2a-neon-intrinsics-constrained.c
===================================================================
--- clang/test/CodeGen/aarch64-v8.2a-neon-intrinsics-constrained.c
+++ clang/test/CodeGen/aarch64-v8.2a-neon-intrinsics-constrained.c
@@ -3,7 +3,7 @@
 // RUN: | opt -S -mem2reg \
 // RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-feature +fullfp16 -target-feature +v8.2a\
-// RUN: -ffp-exception-behavior=strict \
+// RUN: -ffp-exception-behavior=maytrap -DEXCEPT=1 \
 // RUN: -fexperimental-strict-floating-point \
 // RUN: -fallow-half-arguments-and-returns -flax-vector-conversions=none -S -disable-O0-optnone -emit-llvm -o - %s \
 // RUN: | opt -S -mem2reg \
@@ -13,19 +13,27 @@
 // RUN: | opt -S -mem2reg | llc -o=- - \
 // RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-feature +fullfp16 -target-feature +v8.2a\
-// RUN: -ffp-exception-behavior=strict \
+// RUN: -ffp-exception-behavior=maytrap -DEXCEPT=1 \
 // RUN: -fexperimental-strict-floating-point \
 // RUN: -fallow-half-arguments-and-returns -flax-vector-conversions=none -S -disable-O0-optnone -emit-llvm -o - %s \
 // RUN: | opt -S -mem2reg | llc -o=- - \
 // RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
-
+//
 // REQUIRES: aarch64-registered-target
 
+// Test that the constrained intrinsics are picking up the exception
+// metadata from the AST instead of the global default from the command line.
+// FIXME: All cases of "fpexcept.maytrap" in this test are wrong.
+
+#if EXCEPT
+#pragma float_control(except, on)
+#endif
+
 #include <arm_neon.h>
 
 // COMMON-LABEL: test_vsqrt_f16
 // UNCONSTRAINED:  [[SQR:%.*]] = call <4 x half> @llvm.sqrt.v4f16(<4 x half> %a)
-// CONSTRAINED:    [[SQR:%.*]] = call <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:    [[SQR:%.*]] = call <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:      fsqrt v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
 // COMMONIR:       ret <4 x half> [[SQR]]
 float16x4_t test_vsqrt_f16(float16x4_t a) {
@@ -34,7 +42,7 @@
 
 // COMMON-LABEL: test_vsqrtq_f16
 // UNCONSTRAINED:  [[SQR:%.*]] = call <8 x half> @llvm.sqrt.v8f16(<8 x half> %a)
-// CONSTRAINED:    [[SQR:%.*]] = call <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:    [[SQR:%.*]] = call <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:      fsqrt v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
 // COMMONIR:       ret <8 x half> [[SQR]]
 float16x8_t test_vsqrtq_f16(float16x8_t a) {
@@ -43,7 +51,7 @@
 
 // COMMON-LABEL: test_vfma_f16
 // UNCONSTRAINED:  [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a)
-// CONSTRAINED:    [[ADD:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:    [[ADD:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:      fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
 // COMMONIR:       ret <4 x half> [[ADD]]
 float16x4_t test_vfma_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
@@ -52,7 +60,7 @@
 
 // COMMON-LABEL: test_vfmaq_f16
 // UNCONSTRAINED:  [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a)
-// CONSTRAINED:    [[ADD:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:    [[ADD:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:      fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
 // COMMONIR:       ret <8 x half> [[ADD]]
 float16x8_t test_vfmaq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
@@ -63,7 +71,7 @@
 // COMMONIR:       [[SUB:%.*]] = fneg <4 x half> %b
 // CHECK-ASM:      fneg v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
 // UNCONSTRAINED:  [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[SUB]], <4 x half> %c, <4 x half> %a)
-// CONSTRAINED:    [[ADD:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[SUB]], <4 x half> %c, <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:    [[ADD:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[SUB]], <4 x half> %c, <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:      fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
 // COMMONIR:       ret <4 x half> [[ADD]]
 float16x4_t test_vfms_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
@@ -74,7 +82,7 @@
 // COMMONIR:       [[SUB:%.*]] = fneg <8 x half> %b
 // CHECK-ASM:      fneg v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
 // UNCONSTRAINED:  [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[SUB]], <8 x half> %c, <8 x half> %a)
-// CONSTRAINED:    [[ADD:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[SUB]], <8 x half> %c, <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:    [[ADD:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[SUB]], <8 x half> %c, <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:      fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
 // COMMONIR:       ret <8 x half> [[ADD]]
 float16x8_t test_vfmsq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
@@ -90,7 +98,7 @@
 // COMMONIR:      [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
 // COMMONIR:      [[TMP5:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
 // UNCONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]])
-// CONSTRAINED:   [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <4 x half> [[FMLA]]
 float16x4_t test_vfma_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
@@ -106,7 +114,7 @@
 // COMMONIR:      [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
 // COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
 // UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]])
-// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <8 x half> [[FMLA]]
 float16x8_t test_vfmaq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
@@ -122,7 +130,7 @@
 // COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
 // COMMONIR:      [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
 // UNCONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]])
-// CONSTRAINED:   [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <4 x half> [[FMLA]]
 float16x4_t test_vfma_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
@@ -138,7 +146,7 @@
 // COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
 // COMMONIR:      [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
 // UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]])
-// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <8 x half> [[FMLA]]
 float16x8_t test_vfmaq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
@@ -151,7 +159,7 @@
 // COMMONIR:      [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %c, i32 2
 // COMMONIR:      [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %c, i32 3
 // UNCONSTRAINED: [[FMA:%.*]]  = call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> [[TMP3]], <4 x half> %a)
-// CONSTRAINED:   [[FMA:%.*]]  = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %b, <4 x half> [[TMP3]], <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMA:%.*]]  = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %b, <4 x half> [[TMP3]], <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <4 x half> [[FMA]]
 float16x4_t test_vfma_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
@@ -168,7 +176,7 @@
 // COMMONIR:      [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %c, i32 6
 // COMMONIR:      [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %c, i32 7
 // UNCONSTRAINED: [[FMA:%.*]]  = call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> [[TMP7]], <8 x half> %a)
-// CONSTRAINED:   [[FMA:%.*]]  = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> %b, <8 x half> [[TMP7]], <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMA:%.*]]  = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> %b, <8 x half> [[TMP7]], <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <8 x half> [[FMA]]
 float16x8_t test_vfmaq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
@@ -178,7 +186,7 @@
 // COMMON-LABEL: test_vfmah_lane_f16
 // COMMONIR:      [[EXTR:%.*]] = extractelement <4 x half> %c, i32 3
 // UNCONSTRAINED: [[FMA:%.*]]  = call half @llvm.fma.f16(half %b, half [[EXTR]], half %a)
-// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half %b, half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half %b, half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmla h{{[0-9]+}}, h{{[0-9]+}}, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret half [[FMA]]
 float16_t test_vfmah_lane_f16(float16_t a, float16_t b, float16x4_t c) {
@@ -188,7 +196,7 @@
 // COMMON-LABEL: test_vfmah_laneq_f16
 // COMMONIR:      [[EXTR:%.*]] = extractelement <8 x half> %c, i32 7
 // UNCONSTRAINED: [[FMA:%.*]]  = call half @llvm.fma.f16(half %b, half [[EXTR]], half %a)
-// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half %b, half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half %b, half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmla h{{[0-9]+}}, h{{[0-9]+}}, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret half [[FMA]]
 float16_t test_vfmah_laneq_f16(float16_t a, float16_t b, float16x8_t c) {
@@ -205,7 +213,7 @@
 // COMMONIR:      [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
 // COMMONIR:      [[TMP5:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
 // UNCONSTRAINED: [[FMA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]])
-// CONSTRAINED:   [[FMA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmls v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <4 x half> [[FMA]]
 float16x4_t test_vfms_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
@@ -222,7 +230,7 @@
 // COMMONIR:      [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
 // COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
 // UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]])
-// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmls v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <8 x half> [[FMLA]]
 float16x8_t test_vfmsq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
@@ -240,7 +248,7 @@
 // COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
 // COMMONIR:      [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
 // UNCONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]])
-// CONSTRAINED:   [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmls v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <4 x half> [[FMLA]]
 float16x4_t test_vfms_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
@@ -258,7 +266,7 @@
 // COMMONIR:      [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
 // COMMONIR:      [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
 // UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]])
-// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmls v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <8 x half> [[FMLA]]
 float16x8_t test_vfmsq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
@@ -272,7 +280,7 @@
 // COMMONIR:      [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %c, i32 2
 // COMMONIR:      [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %c, i32 3
 // UNCONSTRAINED: [[FMA:%.*]]  = call <4 x half> @llvm.fma.v4f16(<4 x half> [[SUB]], <4 x half> [[TMP3]], <4 x half> %a)
-// CONSTRAINED:   [[FMA:%.*]]  = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[SUB]], <4 x half> [[TMP3]], <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMA:%.*]]  = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[SUB]], <4 x half> [[TMP3]], <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmls v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <4 x half> [[FMA]]
 float16x4_t test_vfms_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
@@ -290,7 +298,7 @@
 // COMMONIR:      [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %c, i32 6
 // COMMONIR:      [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %c, i32 7
 // UNCONSTRAINED: [[FMA:%.*]]  = call <8 x half> @llvm.fma.v8f16(<8 x half> [[SUB]], <8 x half> [[TMP7]], <8 x half> %a)
-// CONSTRAINED:   [[FMA:%.*]]  = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[SUB]], <8 x half> [[TMP7]], <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMA:%.*]]  = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[SUB]], <8 x half> [[TMP7]], <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmls v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret <8 x half> [[FMA]]
 float16x8_t test_vfmsq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
@@ -308,7 +316,7 @@
 // CHECK-ASM:     fcvt h{{[0-9]+}}, s{{[0-9]+}}
 // COMMONIR:      [[EXTR:%.*]] = extractelement <4 x half> %c, i32 3
 // UNCONSTRAINED: [[FMA:%.*]]  = call half @llvm.fma.f16(half [[SUB]], half [[EXTR]], half %a)
-// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmla h{{[0-9]+}}, h{{[0-9]+}}, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret half [[FMA]]
 float16_t test_vfmsh_lane_f16(float16_t a, float16_t b, float16x4_t c) {
@@ -326,10 +334,9 @@
 // CHECK-ASM:     fcvt h{{[0-9]+}}, s{{[0-9]+}}
 // COMMONIR:      [[EXTR:%.*]] = extractelement <8 x half> %c, i32 7
 // UNCONSTRAINED: [[FMA:%.*]]  = call half @llvm.fma.f16(half [[SUB]], half [[EXTR]], half %a)
-// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CONSTRAINED:   [[FMA:%.*]]  = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
 // CHECK-ASM:     fmla h{{[0-9]+}}, h{{[0-9]+}}, v{{[0-9]+}}.h[{{[0-9]+}}]
 // COMMONIR:      ret half [[FMA]]
 float16_t test_vfmsh_laneq_f16(float16_t a, float16_t b, float16x8_t c) {
   return vfmsh_laneq_f16(a, b, c, 7);
 }
-
Index: clang/test/CodeGen/builtin_float_strictfp.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtin_float_strictfp.c
@@ -0,0 +1,92 @@
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-windows-pc -ffp-exception-behavior=maytrap -o - %s | FileCheck %s --check-prefixes=CHECK,FP16
+// RUN: %clang_cc1 -emit-llvm -triple ppc64-be -ffp-exception-behavior=maytrap -o - %s | FileCheck %s --check-prefixes=CHECK,NOFP16
+
+// test to ensure that these builtins don't do the variadic promotion of float->double.
+
+// Test that the constrained intrinsics are picking up the exception
+// metadata from the AST instead of the global default from the command line.
+// FIXME: All cases of "fpexcept.maytrap" in this test are wrong.
+
+#pragma float_control(except, on)
+
+// CHECK-LABEL: @test_floats
+void test_floats(float f1, float f2) {
+  (void)__builtin_isgreater(f1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_isgreaterequal(f1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_isless(f1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_islessequal(f1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_islessgreater(f1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"one", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_isunordered(f1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"uno", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+}
+
+// CHECK-LABEL: @test_doubles
+void test_doubles(double d1, double f2) {
+  (void)__builtin_isgreater(d1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"ogt", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_isgreaterequal(d1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"oge", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_isless(d1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"olt", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_islessequal(d1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"ole", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_islessgreater(d1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"one", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_isunordered(d1, f2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"uno", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+}
+
+// CHECK-LABEL: @test_half
+void test_half(__fp16 *H, __fp16 *H2) {
+  (void)__builtin_isgreater(*H, *H2);
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_isinf(*H);
+  // NOFP16: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.maytrap")
+  // FP16: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half 0xH7C00, metadata !"oeq", metadata !"fpexcept.maytrap")
+}
+
+// CHECK-LABEL: @test_mixed
+void test_mixed(double d1, float f2) {
+  (void)__builtin_isgreater(d1, f2);
+  // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"ogt", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_isgreaterequal(d1, f2);
+  // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"oge", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_isless(d1, f2);
+  // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"olt", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_islessequal(d1, f2);
+  // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"ole", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_islessgreater(d1, f2);
+  // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"one", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+  (void)__builtin_isunordered(d1, f2);
+  // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"uno", metadata !"fpexcept.maytrap")
+  // CHECK-NEXT: zext i1
+}
Index: clang/test/CodeGen/complex-math-strictfp.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/complex-math-strictfp.c
@@ -0,0 +1,24 @@
+// RUN: %clang_cc1 %s -ffp-exception-behavior=maytrap -O0 -emit-llvm -triple x86_64-unknown-unknown -o - | FileCheck %s --check-prefix=X86
+
+// Test proper setting of constrained metadata in sub and div.
+//
+// Test that the constrained intrinsics are picking up the exception
+// metadata from the AST instead of the global default from the command line.
+
+#pragma float_control(except, on)
+
+float _Complex sub_float_rr(float a, float b) {
+  // X86-LABEL: @sub_float_rr(
+  // X86: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // X86-NOT: fsub
+  // X86: ret
+  return a - b;
+}
+
+float _Complex div_float_rr(float a, float b) {
+  // X86-LABEL: @div_float_rr(
+  // X86: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // X86-NOT: fdiv
+  // X86: ret
+  return a / b;
+}
Index: clang/test/CodeGen/complex-strictfp.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/complex-strictfp.c
@@ -0,0 +1,563 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -ffp-exception-behavior=maytrap -DEXCEPT=1 -emit-llvm -o - %s | FileCheck %s
+
+// Test that the constrained intrinsics are picking up the exception
+// metadata from the AST instead of the global default from the command line.
+
+#pragma float_control(except, on)
+
+// CHECK-LABEL: @main(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
+// CHECK-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
+// CHECK-NEXT:    store i32 0, i32* [[RETVAL]], align 4
+// CHECK-NEXT:    [[CONV:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 5, metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5:#.*]]
+// CHECK-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[A]], i32 0, i32 0
+// CHECK-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[A]], i32 0, i32 1
+// CHECK-NEXT:    store double [[CONV]], double* [[A_REALP]], align 8
+// CHECK-NEXT:    store double 0.000000e+00, double* [[A_IMAGP]], align 8
+// CHECK-NEXT:    [[CONV1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 42, metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[B]], i32 0, i32 1
+// CHECK-NEXT:    store double [[CONV1]], double* [[B_REALP]], align 8
+// CHECK-NEXT:    store double 0.000000e+00, double* [[B_IMAGP]], align 8
+// CHECK-NEXT:    [[A_REALP2:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[A]], i32 0, i32 0
+// CHECK-NEXT:    [[A_REAL:%.*]] = load double, double* [[A_REALP2]], align 8
+// CHECK-NEXT:    [[A_IMAGP3:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[A]], i32 0, i32 1
+// CHECK-NEXT:    [[A_IMAG:%.*]] = load double, double* [[A_IMAGP3]], align 8
+// CHECK-NEXT:    [[B_REALP4:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[B_REAL:%.*]] = load double, double* [[B_REALP4]], align 8
+// CHECK-NEXT:    [[B_IMAGP5:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[B]], i32 0, i32 1
+// CHECK-NEXT:    [[B_IMAG:%.*]] = load double, double* [[B_IMAGP5]], align 8
+// CHECK-NEXT:    [[MUL_AC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_REAL]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_BD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_IMAG]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_AD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_IMAG]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_BC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_REAL]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_R:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[MUL_AC]], double [[MUL_BD]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[MUL_AD]], double [[MUL_BC]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[ISNAN_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[MUL_R]], double [[MUL_R]], metadata !"uno", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof !2
+// CHECK:       complex_mul_imag_nan:
+// CHECK-NEXT:    [[ISNAN_CMP6:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[MUL_I]], double [[MUL_I]], metadata !"uno", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    br i1 [[ISNAN_CMP6]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof !2
+// CHECK:       complex_mul_libcall:
+// CHECK-NEXT:    [[CALL:%.*]] = call { double, double } @__muldc3(double [[A_REAL]], double [[A_IMAG]], double [[B_REAL]], double [[B_IMAG]]) [[ATTR6:#.*]]
+// CHECK-NEXT:    [[TMP0:%.*]] = extractvalue { double, double } [[CALL]], 0
+// CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { double, double } [[CALL]], 1
+// CHECK-NEXT:    br label [[COMPLEX_MUL_CONT]]
+// CHECK:       complex_mul_cont:
+// CHECK-NEXT:    [[REAL_MUL_PHI:%.*]] = phi double [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP0]], [[COMPLEX_MUL_LIBCALL]] ]
+// CHECK-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi double [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP1]], [[COMPLEX_MUL_LIBCALL]] ]
+// CHECK-NEXT:    [[B_REALP7:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[B_REAL8:%.*]] = load double, double* [[B_REALP7]], align 8
+// CHECK-NEXT:    [[B_IMAGP9:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[B]], i32 0, i32 1
+// CHECK-NEXT:    [[B_IMAG10:%.*]] = load double, double* [[B_IMAGP9]], align 8
+// CHECK-NEXT:    [[A_REALP11:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[A]], i32 0, i32 0
+// CHECK-NEXT:    [[A_REAL12:%.*]] = load double, double* [[A_REALP11]], align 8
+// CHECK-NEXT:    [[A_IMAGP13:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[A]], i32 0, i32 1
+// CHECK-NEXT:    [[A_IMAG14:%.*]] = load double, double* [[A_IMAGP13]], align 8
+// CHECK-NEXT:    [[MUL_AC15:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_REAL8]], double [[A_REAL12]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_BD16:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_IMAG10]], double [[A_IMAG14]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_AD17:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_REAL8]], double [[A_IMAG14]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_BC18:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_IMAG10]], double [[A_REAL12]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_R19:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[MUL_AC15]], double [[MUL_BD16]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_I20:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[MUL_AD17]], double [[MUL_BC18]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[ISNAN_CMP21:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[MUL_R19]], double [[MUL_R19]], metadata !"uno", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    br i1 [[ISNAN_CMP21]], label [[COMPLEX_MUL_IMAG_NAN22:%.*]], label [[COMPLEX_MUL_CONT26:%.*]], !prof !2
+// CHECK:       complex_mul_imag_nan22:
+// CHECK-NEXT:    [[ISNAN_CMP23:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[MUL_I20]], double [[MUL_I20]], metadata !"uno", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    br i1 [[ISNAN_CMP23]], label [[COMPLEX_MUL_LIBCALL24:%.*]], label [[COMPLEX_MUL_CONT26]], !prof !2
+// CHECK:       complex_mul_libcall24:
+// CHECK-NEXT:    [[CALL25:%.*]] = call { double, double } @__muldc3(double [[B_REAL8]], double [[B_IMAG10]], double [[A_REAL12]], double [[A_IMAG14]]) [[ATTR6]]
+// CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { double, double } [[CALL25]], 0
+// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { double, double } [[CALL25]], 1
+// CHECK-NEXT:    br label [[COMPLEX_MUL_CONT26]]
+// CHECK:       complex_mul_cont26:
+// CHECK-NEXT:    [[REAL_MUL_PHI27:%.*]] = phi double [ [[MUL_R19]], [[COMPLEX_MUL_CONT]] ], [ [[MUL_R19]], [[COMPLEX_MUL_IMAG_NAN22]] ], [ [[TMP2]], [[COMPLEX_MUL_LIBCALL24]] ]
+// CHECK-NEXT:    [[IMAG_MUL_PHI28:%.*]] = phi double [ [[MUL_I20]], [[COMPLEX_MUL_CONT]] ], [ [[MUL_I20]], [[COMPLEX_MUL_IMAG_NAN22]] ], [ [[TMP3]], [[COMPLEX_MUL_LIBCALL24]] ]
+// CHECK-NEXT:    [[CMP_R:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[REAL_MUL_PHI]], double [[REAL_MUL_PHI27]], metadata !"une", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[CMP_I:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[IMAG_MUL_PHI]], double [[IMAG_MUL_PHI28]], metadata !"une", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[OR_RI:%.*]] = or i1 [[CMP_R]], [[CMP_I]]
+// CHECK-NEXT:    [[CONV29:%.*]] = zext i1 [[OR_RI]] to i32
+// CHECK-NEXT:    ret i32 [[CONV29]]
+//
+int main(void)
+{
+  double _Complex a = 5;
+  double _Complex b = 42;
+
+  return a * b != b * a;
+}
+
+_Complex double bar(int);
+void test(_Complex double*);
+void takecomplex(_Complex double);
+
+// CHECK-LABEL: @test2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[C_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[X:%.*]] = alloca { double, double }, align 8
+// CHECK-NEXT:    [[COERCE:%.*]] = alloca { double, double }, align 8
+// CHECK-NEXT:    store i32 [[C:%.*]], i32* [[C_ADDR]], align 4
+// CHECK-NEXT:    [[CALL:%.*]] = call { double, double } @bar(i32 1) [[ATTR5]]
+// CHECK-NEXT:    [[TMP0:%.*]] = extractvalue { double, double } [[CALL]], 0
+// CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { double, double } [[CALL]], 1
+// CHECK-NEXT:    [[X_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[X]], i32 0, i32 0
+// CHECK-NEXT:    [[X_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[X]], i32 0, i32 1
+// CHECK-NEXT:    store double [[TMP0]], double* [[X_REALP]], align 8
+// CHECK-NEXT:    store double [[TMP1]], double* [[X_IMAGP]], align 8
+// CHECK-NEXT:    call void @test({ double, double }* [[X]]) [[ATTR5]]
+// CHECK-NEXT:    [[X_REALP1:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[X]], i32 0, i32 0
+// CHECK-NEXT:    [[X_REAL:%.*]] = load double, double* [[X_REALP1]], align 8
+// CHECK-NEXT:    [[X_IMAGP2:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[X]], i32 0, i32 1
+// CHECK-NEXT:    [[X_IMAG:%.*]] = load double, double* [[X_IMAGP2]], align 8
+// CHECK-NEXT:    [[COERCE_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[COERCE]], i32 0, i32 0
+// CHECK-NEXT:    [[COERCE_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[COERCE]], i32 0, i32 1
+// CHECK-NEXT:    store double [[X_REAL]], double* [[COERCE_REALP]], align 8
+// CHECK-NEXT:    store double [[X_IMAG]], double* [[COERCE_IMAGP]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[COERCE]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP3:%.*]] = load double, double* [[TMP2]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[COERCE]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP5:%.*]] = load double, double* [[TMP4]], align 8
+// CHECK-NEXT:    call void @takecomplex(double [[TMP3]], double [[TMP5]]) [[ATTR5]]
+// CHECK-NEXT:    ret void
+//
+void test2(int c) {
+  _Complex double X;
+  X = bar(1);
+  test(&X);
+  takecomplex(X);
+}
+
+_Complex double g1, g2;
+_Complex float cf;
+double D;
+
+// CHECK-LABEL: @test3(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[GR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    [[G1_REAL:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G1_IMAG:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[G2_REAL:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g2, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G2_IMAG:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g2, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[ADD_R:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[G1_REAL]], double [[G2_REAL]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[ADD_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[G1_IMAG]], double [[G2_IMAG]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store double [[ADD_R]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    store double [[ADD_I]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[G1_REAL1:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G1_IMAG2:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[G2_REAL3:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g2, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G2_IMAG4:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g2, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[SUB_R:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[G1_REAL1]], double [[G2_REAL3]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[SUB_I:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[G1_IMAG2]], double [[G2_IMAG4]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store double [[SUB_R]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    store double [[SUB_I]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[G1_REAL5:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G1_IMAG6:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[G2_REAL7:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g2, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G2_IMAG8:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g2, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[MUL_AC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[G1_REAL5]], double [[G2_REAL7]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_BD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[G1_IMAG6]], double [[G2_IMAG8]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_AD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[G1_REAL5]], double [[G2_IMAG8]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_BC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[G1_IMAG6]], double [[G2_REAL7]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_R:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[MUL_AC]], double [[MUL_BD]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[MUL_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[MUL_AD]], double [[MUL_BC]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[ISNAN_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[MUL_R]], double [[MUL_R]], metadata !"uno", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof !2
+// CHECK:       complex_mul_imag_nan:
+// CHECK-NEXT:    [[ISNAN_CMP9:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[MUL_I]], double [[MUL_I]], metadata !"uno", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    br i1 [[ISNAN_CMP9]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof !2
+// CHECK:       complex_mul_libcall:
+// CHECK-NEXT:    [[CALL:%.*]] = call { double, double } @__muldc3(double [[G1_REAL5]], double [[G1_IMAG6]], double [[G2_REAL7]], double [[G2_IMAG8]]) [[ATTR6]]
+// CHECK-NEXT:    [[TMP0:%.*]] = extractvalue { double, double } [[CALL]], 0
+// CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { double, double } [[CALL]], 1
+// CHECK-NEXT:    br label [[COMPLEX_MUL_CONT]]
+// CHECK:       complex_mul_cont:
+// CHECK-NEXT:    [[REAL_MUL_PHI:%.*]] = phi double [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP0]], [[COMPLEX_MUL_LIBCALL]] ]
+// CHECK-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi double [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP1]], [[COMPLEX_MUL_LIBCALL]] ]
+// CHECK-NEXT:    store double [[REAL_MUL_PHI]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    store double [[IMAG_MUL_PHI]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[G1_REAL10:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G1_IMAG11:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[CONJ_I:%.*]] = fneg double [[G1_IMAG11]]
+// CHECK-NEXT:    [[NEG_R:%.*]] = fneg double [[G1_REAL10]]
+// CHECK-NEXT:    [[NEG_I:%.*]] = fneg double [[CONJ_I]]
+// CHECK-NEXT:    store double [[NEG_R]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    store double [[NEG_I]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    store double [[TMP2]], double* [[GR]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load double, double* @D, align 8
+// CHECK-NEXT:    [[CF_REAL:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CF_IMAG:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_REAL]], metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[CONV12:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_IMAG]], metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[ADD_R13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[CONV]], double [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[CONV14:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[ADD_R13]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[CONV15:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[CONV12]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store float [[CONV14]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
+// CHECK-NEXT:    store float [[CONV15]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CF_REAL16:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CF_IMAG17:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CONV18:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_REAL16]], metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[CONV19:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_IMAG17]], metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[TMP4:%.*]] = load double, double* @D, align 8
+// CHECK-NEXT:    [[ADD_R20:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP4]], double [[CONV18]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store double [[ADD_R20]], double* @D, align 8
+// CHECK-NEXT:    [[G1_REAL21:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G1_IMAG22:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[CF_REAL23:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CF_IMAG24:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CONV25:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_REAL23]], metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[CONV26:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_IMAG24]], metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[CALL27:%.*]] = call { double, double } @__divdc3(double [[CONV25]], double [[CONV26]], double [[G1_REAL21]], double [[G1_IMAG22]]) [[ATTR6]]
+// CHECK-NEXT:    [[TMP5:%.*]] = extractvalue { double, double } [[CALL27]], 0
+// CHECK-NEXT:    [[TMP6:%.*]] = extractvalue { double, double } [[CALL27]], 1
+// CHECK-NEXT:    [[CONV28:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[CONV29:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP6]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store float [[CONV28]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
+// CHECK-NEXT:    store float [[CONV29]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[G1_REAL30:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G1_IMAG31:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[TMP7:%.*]] = load double, double* @D, align 8
+// CHECK-NEXT:    [[ADD_R32:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[G1_REAL30]], double [[TMP7]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store double [[ADD_R32]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    store double [[G1_IMAG31]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[TMP8:%.*]] = load double, double* @D, align 8
+// CHECK-NEXT:    [[G1_REAL33:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G1_IMAG34:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[ADD_R35:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP8]], double [[G1_REAL33]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store double [[ADD_R35]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    store double [[G1_IMAG34]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    ret void
+//
+void test3() {
+  g1 = g1 + g2;
+  g1 = g1 - g2;
+  g1 = g1 * g2;
+  g1 = +-~g1;
+
+  double Gr = __real g1;
+
+  cf += D;
+  D += cf;
+  cf /= g1;
+  g1 = g1 + D;
+  g1 = D + g1;
+}
+
+__complex__ int ci1, ci2;
+__complex__ short cs;
+int i;
+// CHECK-LABEL: @test3int(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[CI1_REAL:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI1_IMAG:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CI2_REAL:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci2, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI2_IMAG:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci2, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[ADD_R:%.*]] = add i32 [[CI1_REAL]], [[CI2_REAL]]
+// CHECK-NEXT:    [[ADD_I:%.*]] = add i32 [[CI1_IMAG]], [[CI2_IMAG]]
+// CHECK-NEXT:    store i32 [[ADD_R]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    store i32 [[ADD_I]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CI1_REAL1:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI1_IMAG2:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CI2_REAL3:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci2, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI2_IMAG4:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci2, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[SUB_R:%.*]] = sub i32 [[CI1_REAL1]], [[CI2_REAL3]]
+// CHECK-NEXT:    [[SUB_I:%.*]] = sub i32 [[CI1_IMAG2]], [[CI2_IMAG4]]
+// CHECK-NEXT:    store i32 [[SUB_R]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    store i32 [[SUB_I]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CI1_REAL5:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI1_IMAG6:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CI2_REAL7:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci2, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI2_IMAG8:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci2, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[MUL_RL:%.*]] = mul i32 [[CI1_REAL5]], [[CI2_REAL7]]
+// CHECK-NEXT:    [[MUL_RR:%.*]] = mul i32 [[CI1_IMAG6]], [[CI2_IMAG8]]
+// CHECK-NEXT:    [[MUL_R:%.*]] = sub i32 [[MUL_RL]], [[MUL_RR]]
+// CHECK-NEXT:    [[MUL_IL:%.*]] = mul i32 [[CI1_IMAG6]], [[CI2_REAL7]]
+// CHECK-NEXT:    [[MUL_IR:%.*]] = mul i32 [[CI1_REAL5]], [[CI2_IMAG8]]
+// CHECK-NEXT:    [[MUL_I:%.*]] = add i32 [[MUL_IL]], [[MUL_IR]]
+// CHECK-NEXT:    store i32 [[MUL_R]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    store i32 [[MUL_I]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CI1_REAL9:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI1_IMAG10:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CONJ_I:%.*]] = sub i32 0, [[CI1_IMAG10]]
+// CHECK-NEXT:    [[NEG_R:%.*]] = sub i32 0, [[CI1_REAL9]]
+// CHECK-NEXT:    [[NEG_I:%.*]] = sub i32 0, [[CONJ_I]]
+// CHECK-NEXT:    store i32 [[NEG_R]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    store i32 [[NEG_I]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    store i32 [[TMP0]], i32* @i, align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @i, align 4
+// CHECK-NEXT:    [[CS_REAL:%.*]] = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @cs, i32 0, i32 0), align 2
+// CHECK-NEXT:    [[CS_IMAG:%.*]] = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @cs, i32 0, i32 1), align 2
+// CHECK-NEXT:    [[CONV:%.*]] = sext i16 [[CS_REAL]] to i32
+// CHECK-NEXT:    [[CONV11:%.*]] = sext i16 [[CS_IMAG]] to i32
+// CHECK-NEXT:    [[ADD_R12:%.*]] = add i32 [[CONV]], [[TMP1]]
+// CHECK-NEXT:    [[ADD_I13:%.*]] = add i32 [[CONV11]], 0
+// CHECK-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD_R12]] to i16
+// CHECK-NEXT:    [[CONV15:%.*]] = trunc i32 [[ADD_I13]] to i16
+// CHECK-NEXT:    store i16 [[CONV14]], i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @cs, i32 0, i32 0), align 2
+// CHECK-NEXT:    store i16 [[CONV15]], i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @cs, i32 0, i32 1), align 2
+// CHECK-NEXT:    [[CF_REAL:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CF_IMAG:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CONV16:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_REAL]], metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[CONV17:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_IMAG]], metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    [[TMP2:%.*]] = load double, double* @D, align 8
+// CHECK-NEXT:    [[ADD_R18:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP2]], double [[CONV16]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store double [[ADD_R18]], double* @D, align 8
+// CHECK-NEXT:    [[CI1_REAL19:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI1_IMAG20:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CS_REAL21:%.*]] = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @cs, i32 0, i32 0), align 2
+// CHECK-NEXT:    [[CS_IMAG22:%.*]] = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @cs, i32 0, i32 1), align 2
+// CHECK-NEXT:    [[CONV23:%.*]] = sext i16 [[CS_REAL21]] to i32
+// CHECK-NEXT:    [[CONV24:%.*]] = sext i16 [[CS_IMAG22]] to i32
+// CHECK-NEXT:    [[TMP3:%.*]] = mul i32 [[CONV23]], [[CI1_REAL19]]
+// CHECK-NEXT:    [[TMP4:%.*]] = mul i32 [[CONV24]], [[CI1_IMAG20]]
+// CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[TMP3]], [[TMP4]]
+// CHECK-NEXT:    [[TMP6:%.*]] = mul i32 [[CI1_REAL19]], [[CI1_REAL19]]
+// CHECK-NEXT:    [[TMP7:%.*]] = mul i32 [[CI1_IMAG20]], [[CI1_IMAG20]]
+// CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP6]], [[TMP7]]
+// CHECK-NEXT:    [[TMP9:%.*]] = mul i32 [[CONV24]], [[CI1_REAL19]]
+// CHECK-NEXT:    [[TMP10:%.*]] = mul i32 [[CONV23]], [[CI1_IMAG20]]
+// CHECK-NEXT:    [[TMP11:%.*]] = sub i32 [[TMP9]], [[TMP10]]
+// CHECK-NEXT:    [[TMP12:%.*]] = sdiv i32 [[TMP5]], [[TMP8]]
+// CHECK-NEXT:    [[TMP13:%.*]] = sdiv i32 [[TMP11]], [[TMP8]]
+// CHECK-NEXT:    [[CONV25:%.*]] = trunc i32 [[TMP12]] to i16
+// CHECK-NEXT:    [[CONV26:%.*]] = trunc i32 [[TMP13]] to i16
+// CHECK-NEXT:    store i16 [[CONV25]], i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @cs, i32 0, i32 0), align 2
+// CHECK-NEXT:    store i16 [[CONV26]], i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @cs, i32 0, i32 1), align 2
+// CHECK-NEXT:    [[CI1_REAL27:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI1_IMAG28:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[TMP14:%.*]] = load i32, i32* @i, align 4
+// CHECK-NEXT:    [[ADD_R29:%.*]] = add i32 [[CI1_REAL27]], [[TMP14]]
+// CHECK-NEXT:    [[ADD_I30:%.*]] = add i32 [[CI1_IMAG28]], 0
+// CHECK-NEXT:    store i32 [[ADD_R29]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    store i32 [[ADD_I30]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[TMP15:%.*]] = load i32, i32* @i, align 4
+// CHECK-NEXT:    [[CI1_REAL31:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI1_IMAG32:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[ADD_R33:%.*]] = add i32 [[TMP15]], [[CI1_REAL31]]
+// CHECK-NEXT:    [[ADD_I34:%.*]] = add i32 0, [[CI1_IMAG32]]
+// CHECK-NEXT:    store i32 [[ADD_R33]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    store i32 [[ADD_I34]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    ret void
+//
+void test3int() {
+  ci1 = ci1 + ci2;
+  ci1 = ci1 - ci2;
+  ci1 = ci1 * ci2;
+  ci1 = +-~ci1;
+
+  i = __real ci1;
+
+  cs += i;
+  D += cf;
+  cs /= ci1;
+  ci1 = ci1 + i;
+  ci1 = i + ci1;
+}
+
+// CHECK-LABEL: @t1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[CONV:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double 4.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store float [[CONV]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
+// CHECK-NEXT:    ret void
+//
+void t1() {
+  (__real__ cf) = 4.0;
+}
+
+// CHECK-LABEL: @t2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[CONV:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double 4.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store float [[CONV]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
+// CHECK-NEXT:    ret void
+//
+void t2() {
+  (__imag__ cf) = 4.0;
+}
+
+// PR1960
+// CHECK-LABEL: @t3(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[V:%.*]] = alloca { i64, i64 }, align 8
+// CHECK-NEXT:    [[V_REALP:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[V]], i32 0, i32 0
+// CHECK-NEXT:    [[V_IMAGP:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[V]], i32 0, i32 1
+// CHECK-NEXT:    store i64 2, i64* [[V_REALP]], align 8
+// CHECK-NEXT:    store i64 0, i64* [[V_IMAGP]], align 8
+// CHECK-NEXT:    ret void
+//
+void t3() {
+  __complex__ long long v = 2;
+}
+
+// PR3131
+float _Complex t4();
+
+// CHECK-LABEL: @t5(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X:%.*]] = alloca { float, float }, align 4
+// CHECK-NEXT:    [[COERCE:%.*]] = alloca { float, float }, align 4
+// CHECK-NEXT:    [[CALL:%.*]] = call <2 x float> (...) @t4() [[ATTR5]]
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast { float, float }* [[COERCE]] to <2 x float>*
+// CHECK-NEXT:    store <2 x float> [[CALL]], <2 x float>* [[TMP0]], align 4
+// CHECK-NEXT:    [[COERCE_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[COERCE]], i32 0, i32 0
+// CHECK-NEXT:    [[COERCE_REAL:%.*]] = load float, float* [[COERCE_REALP]], align 4
+// CHECK-NEXT:    [[COERCE_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[COERCE]], i32 0, i32 1
+// CHECK-NEXT:    [[COERCE_IMAG:%.*]] = load float, float* [[COERCE_IMAGP]], align 4
+// CHECK-NEXT:    [[X_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[X]], i32 0, i32 0
+// CHECK-NEXT:    [[X_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[X]], i32 0, i32 1
+// CHECK-NEXT:    store float [[COERCE_REAL]], float* [[X_REALP]], align 4
+// CHECK-NEXT:    store float [[COERCE_IMAG]], float* [[X_IMAGP]], align 4
+// CHECK-NEXT:    ret void
+//
+void t5() {
+  float _Complex x = t4();
+}
+
+// CHECK-LABEL: @t6(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[G1_REAL:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G1_IMAG:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[INC:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[G1_REAL]], double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store double [[INC]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    store double [[G1_IMAG]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[G1_REAL1:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G1_IMAG2:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[DEC:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[G1_REAL1]], double -1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store double [[DEC]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    store double [[G1_IMAG2]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[G1_REAL3:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G1_IMAG4:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[INC5:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[G1_REAL3]], double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store double [[INC5]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    store double [[G1_IMAG4]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[G1_REAL6:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    [[G1_IMAG7:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[DEC8:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[G1_REAL6]], double -1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    store double [[DEC8]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
+// CHECK-NEXT:    store double [[G1_IMAG7]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
+// CHECK-NEXT:    [[CI1_REAL:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI1_IMAG:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[INC9:%.*]] = add i32 [[CI1_REAL]], 1
+// CHECK-NEXT:    store i32 [[INC9]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    store i32 [[CI1_IMAG]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CI1_REAL10:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI1_IMAG11:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[DEC12:%.*]] = add i32 [[CI1_REAL10]], -1
+// CHECK-NEXT:    store i32 [[DEC12]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    store i32 [[CI1_IMAG11]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CI1_REAL13:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI1_IMAG14:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[INC15:%.*]] = add i32 [[CI1_REAL13]], 1
+// CHECK-NEXT:    store i32 [[INC15]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    store i32 [[CI1_IMAG14]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[CI1_REAL16:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    [[CI1_IMAG17:%.*]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    [[DEC18:%.*]] = add i32 [[CI1_REAL16]], -1
+// CHECK-NEXT:    store i32 [[DEC18]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 0), align 4
+// CHECK-NEXT:    store i32 [[CI1_IMAG17]], i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @ci1, i32 0, i32 1), align 4
+// CHECK-NEXT:    ret void
+//
+void t6() {
+  g1++;
+  g1--;
+  ++g1;
+  --g1;
+  ci1++;
+  ci1--;
+  ++ci1;
+  --ci1;
+}
+
+// <rdar://problem/7958272>
+// CHECK-LABEL: @t7(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[C:%.*]] = alloca { double, double }, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[C]], i32 0, i32 0
+// CHECK-NEXT:    store double [[C_COERCE0:%.*]], double* [[TMP0]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[C]], i32 0, i32 1
+// CHECK-NEXT:    store double [[C_COERCE1:%.*]], double* [[TMP1]], align 8
+// CHECK-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[C]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP2:%.*]] = load double, double* [[C_REALP]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = call double @llvm.fabs.f64(double [[TMP2]]) [[ATTR5]]
+// CHECK-NEXT:    ret double [[TMP3]]
+//
+double t7(double _Complex c) {
+  return __builtin_fabs(__real__(c));
+}
+
+// CHECK-LABEL: @t8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X:%.*]] = alloca { i32, i32 }*, align 8
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL:%.*]] = alloca { i32, i32 }, align 4
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_REALP:%.*]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DOTCOMPOUNDLITERAL]], i32 0, i32 0
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_IMAGP:%.*]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[DOTCOMPOUNDLITERAL]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[DOTCOMPOUNDLITERAL_REALP]], align 4
+// CHECK-NEXT:    store i32 0, i32* [[DOTCOMPOUNDLITERAL_IMAGP]], align 4
+// CHECK-NEXT:    store { i32, i32 }* [[DOTCOMPOUNDLITERAL]], { i32, i32 }** [[X]], align 8
+// CHECK-NEXT:    ret void
+//
+void t8() {
+  __complex__ int *x = &(__complex__ int){1};
+}
+
+const _Complex double test9const = 0;
+// CHECK-LABEL: @test9func(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
+// CHECK-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 0
+// CHECK-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 1
+// CHECK-NEXT:    store double 0.000000e+00, double* [[RETVAL_REALP]], align 8
+// CHECK-NEXT:    store double 0.000000e+00, double* [[RETVAL_IMAGP]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load { double, double }, { double, double }* [[RETVAL]], align 8
+// CHECK-NEXT:    ret { double, double } [[TMP0]]
+//
+_Complex double test9func() { return test9const; }
+
+// D6217
+// CHECK-LABEL: @t91(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[C:%.*]] = alloca [0 x i8], align 1
+// CHECK-NEXT:    br i1 false, label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK:       cond.true:
+// CHECK-NEXT:    [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float 2.000000e+00, metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    br label [[COND_END:%.*]]
+// CHECK:       cond.false:
+// CHECK-NEXT:    [[CONV1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float 2.000000e+00, metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    br label [[COND_END]]
+// CHECK:       cond.end:
+// CHECK-NEXT:    [[COND_R:%.*]] = phi double [ [[CONV]], [[COND_TRUE]] ], [ [[CONV1]], [[COND_FALSE]] ]
+// CHECK-NEXT:    [[COND_I:%.*]] = phi double [ 0.000000e+00, [[COND_TRUE]] ], [ 0.000000e+00, [[COND_FALSE]] ]
+// CHECK-NEXT:    ret void
+//
+void t91() {
+  // Check for proper type promotion of conditional expression
+  char c[(int)(sizeof(typeof((0 ? 2.0f : (_Complex double) 2.0f))) - sizeof(_Complex double))];
+  // Check for proper codegen
+  (0 ? 2.0f : (_Complex double) 2.0f);
+}
+
+// CHECK-LABEL: @t92(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[C:%.*]] = alloca [0 x i8], align 1
+// CHECK-NEXT:    br i1 false, label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK:       cond.true:
+// CHECK-NEXT:    [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float 2.000000e+00, metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    br label [[COND_END:%.*]]
+// CHECK:       cond.false:
+// CHECK-NEXT:    [[CONV1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float 2.000000e+00, metadata !"fpexcept.maytrap") [[ATTR5]]
+// CHECK-NEXT:    br label [[COND_END]]
+// CHECK:       cond.end:
+// CHECK-NEXT:    [[COND_R:%.*]] = phi double [ [[CONV]], [[COND_TRUE]] ], [ [[CONV1]], [[COND_FALSE]] ]
+// CHECK-NEXT:    [[COND_I:%.*]] = phi double [ 0.000000e+00, [[COND_TRUE]] ], [ 0.000000e+00, [[COND_FALSE]] ]
+// CHECK-NEXT:    ret void
+//
+void t92() {
+  // Check for proper type promotion of conditional expression
+  char c[(int)(sizeof(typeof((0 ? (_Complex double) 2.0f : 2.0f))) - sizeof(_Complex double))];
+  // Check for proper codegen
+  (0 ? (_Complex double) 2.0f : 2.0f);
+}
+
Index: clang/test/CodeGen/constrained-math-builtins.c
===================================================================
--- clang/test/CodeGen/constrained-math-builtins.c
+++ clang/test/CodeGen/constrained-math-builtins.c
@@ -1,6 +1,11 @@
-// RUN: %clang_cc1 -triple x86_64-linux -ffp-exception-behavior=strict -w -S -o - -emit-llvm %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-linux -ffp-exception-behavior=maytrap -w -S -o - -emit-llvm %s | FileCheck %s
 
 // Test codegen of constrained math builtins.
+//
+// Test that the constrained intrinsics are picking up the exception
+// metadata from the AST instead of the global default from the command line.
+
+#pragma float_control(except, on)
 
 void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {
   f = __builtin_fmod(f,f);    f = __builtin_fmodf(f,f);   f =  __builtin_fmodl(f,f);
@@ -154,9 +159,9 @@
   (double)f * f - f;
   (long double)-f * f + f;
 
-  // CHECK: call float @llvm.experimental.constrained.fmuladd.f32
+  // CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
   // CHECK: fneg
-  // CHECK: call double @llvm.experimental.constrained.fmuladd.f64
+  // CHECK: call double @llvm.experimental.constrained.fmuladd.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
   // CHECK: fneg
-  // CHECK: call x86_fp80 @llvm.experimental.constrained.fmuladd.f80
+  // CHECK: call x86_fp80 @llvm.experimental.constrained.fmuladd.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
 };
Index: clang/test/CodeGen/exprs-strictfp.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/exprs-strictfp.c
@@ -0,0 +1,17 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown %s -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown %s -ffp-exception-behavior=maytrap -emit-llvm -o - | FileCheck %s
+
+// Test codegen of constrained floating point to bool conversion
+//
+// Test that the constrained intrinsics are picking up the exception
+// metadata from the AST instead of the global default from the command line.
+
+#pragma float_control(except, on)
+
+void eMaisUma() {
+  double t[1];
+  if (*t)
+    return;
+// CHECK: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double 0.000000e+00, metadata !"une", metadata !"fpexcept.strict")
+}
+
Index: clang/test/CodeGen/fp16-ops-strictfp.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/fp16-ops-strictfp.c
@@ -0,0 +1,741 @@
+// REQUIRES: arm-registered-target
+// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple arm-none-linux-gnueabi %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK -vv -dump-input=fail
+// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple aarch64-none-linux-gnueabi %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
+// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple x86_64-linux-gnu %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
+// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple arm-none-linux-gnueabi -fallow-half-arguments-and-returns %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
+// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple aarch64-none-linux-gnueabi -fallow-half-arguments-and-returns %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
+// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple arm-none-linux-gnueabi -fnative-half-type %s \
+// RUN:   | tee /tmp/X | FileCheck %s --check-prefix=NATIVE-HALF --check-prefix=CHECK
+// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple aarch64-none-linux-gnueabi -fnative-half-type %s \
+// RUN:   | FileCheck %s --check-prefix=NATIVE-HALF --check-prefix=CHECK
+//
+// Test that the constrained intrinsics are picking up the exception
+// metadata from the AST instead of the global default from the command line.
+// FIXME: All cases of "fpexcept.maytrap" in this test are wrong.
+
+#pragma float_control(except, on)
+
+typedef unsigned cond_t;
+typedef __fp16 float16_t;
+
+volatile cond_t test;
+volatile int i0;
+volatile __fp16 h0 = 0.0, h1 = 1.0, h2;
+volatile float f0, f1, f2;
+volatile double d0;
+short s0;
+
+void foo(void) {
+  // CHECK-LABEL: define void @foo()
+
+  // Check unary ops
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %{{.*}}, metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 = (test);
+
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half 0xH0000, metadata !"une", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float 0.000000e+00, metadata !"une", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (!h1);
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: fneg float
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: fneg half
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = -h1;
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: load volatile half
+  // NATIVE-HALF-NEXT: store volatile half
+  // NOTNATIVE: store {{.*}} half {{.*}}, half*
+  h1 = +h1;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half 0xH3C00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1++;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half 0xH3C00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  ++h1;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half 0xHBC00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  --h1;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half 0xHBC00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1--;
+
+  // Check binary ops with various operands
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = h0 * h2;
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = h0 * (__fp16) -2.0f;
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = h0 * f2;
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = f0 * h2;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = h0 * i0;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (h0 / h2);
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (h0 / (__fp16) -2.0f);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (h0 / f2);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (f0 / h2);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (h0 / i0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (h2 + h0);
+
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f64(double -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = ((__fp16)-2.0 + h0);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (h2 + f0);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (f2 + h0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (h0 + i0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (h2 - h0);
+
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = ((__fp16)-2.0f - h0);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (h2 - f0);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (f2 - h0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (h0 - i0);
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h2 < h0);
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h2 < (__fp16)42.0);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h2 < f0);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (f2 < h0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (i0 < h0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0 < i0);
+
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0 > h2);
+
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = ((__fp16)42.0 > h2);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0 > f2);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (f0 > h2);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (i0 > h0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0 > i0);
+
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h2 <= h0);
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %98, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h2 <= (__fp16)42.0);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h2 <= f0);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (f2 <= h0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (i0 <= h0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0 <= i0);
+
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0 >= h2);
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f64(double -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0 >= (__fp16)-2.0);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0 >= f2);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (f0 >= h2);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (i0 >= h0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0 >= i0);
+
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h1 == h2);
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %122, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h1 == (__fp16)1.0);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h1 == f1);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (f1 == h1);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (i0 == h0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0 == i0);
+
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h1 != h2);
+
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h1 != (__fp16)1.0);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h1 != f1);
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (f1 != h1);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (i0 != h0);
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  test = (h0 != i0);
+
+  // NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half 0xH0000, metadata !"une", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float {{.*}}, metadata !"une", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h1 = (h1 ? h2 : h0);
+
+  // Check assignments (inc. compound)
+  // CHECK: store {{.*}} half {{.*}}, half*
+  // xATIVE-HALF: store {{.*}} half 0xHC000 // FIXME: We should be folding here.
+  h0 = h1;
+
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 = (__fp16)-2.0f;
+
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 = f0;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 = i0;
+
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %{{.*}}, metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  i0 = h0;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 += h1;
+
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f32(float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 += (__fp16)1.0f;
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // CHECK: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 += f2;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %{{.*}}, metadata !"fpexcept.maytrap") 
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %{{.*}}, metadata !"fpexcept.maytrap") 
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  i0 += h0;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 += i0;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 -= h1;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 -= (__fp16)1.0;
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // CHECK: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 -= f2;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %{{.*}}, metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  i0 -= h0;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 -= i0;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 *= h1;
+
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 *= (__fp16)1.0;
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // CHECK: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 *= f2;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %{{.*}}, metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  i0 *= h0;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 *= i0;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 /= h1;
+
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 /= (__fp16)1.0;
+
+  // CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // CHECK: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 /= f2;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %{{.*}}, metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} i32 {{.*}}, i32*
+  i0 /= h0;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
+  // NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 /= i0;
+
+  // Check conversions to/from double
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 = d0;
+
+  // CHECK: call float @llvm.experimental.constrained.fptrunc.f32.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 = (float)d0;
+
+  // CHECK: call double @llvm.experimental.constrained.fpext.f64.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} double {{.*}}, double*
+  d0 = h0;
+
+  // CHECK: [[MID:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(float [[MID]], metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} double {{.*}}, double*
+  d0 = (float)h0;
+
+  // NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  // CHECK: store {{.*}} half {{.*}}, half*
+  h0 = s0;
+}
+
+// CHECK-LABEL: define void @testTypeDef(
+// NATIVE-HALF: call <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// NOTNATIVE: %[[CONV:.*]] = call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
+// NOTNATIVE: %[[CONV1:.*]] = call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
+// NOTNATIVE: %[[ADD:.*]] = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %conv, <4 x float> %conv1, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// NOTNATIVE: call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float> %add, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+void testTypeDef() {
+  __fp16 t0 __attribute__((vector_size(8)));
+  float16_t t1 __attribute__((vector_size(8)));
+  t1 = t0 + t1;
+}
+
Index: clang/test/CodeGen/incdec-strictfp.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/incdec-strictfp.c
@@ -0,0 +1,24 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -ffp-exception-behavior=maytrap -emit-llvm -o - %s | FileCheck %s
+//
+// Test that the constrained intrinsics are picking up the exception
+// metadata from the AST instead of the global default from the command line.
+// FIXME: All cases of "fpexcept.maytrap" in this test are wrong.
+
+#pragma float_control(except, on)
+
+int printf(const char * restrict format, ...);
+
+// CHECK-LABEL: @incdec_test
+void incdec_test(double A, double B, double C, double D) {
+  A++;
+// CHECK: %inc = call double @llvm.experimental.constrained.fadd.f64(double %{{.*}}, double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  B--;
+// CHECK: %dec = call double @llvm.experimental.constrained.fadd.f64(double %{{.*}}, double -1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  ++C;
+// CHECK: call double @llvm.experimental.constrained.fadd.f64(double %{{.*}}, double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  --D;
+// CHECK: call double @llvm.experimental.constrained.fadd.f64(double %{{.*}}, double -1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+
+  printf("%lf %lf\n", A, B);
+}
+
Index: clang/test/CodeGen/ubsan-conditional-strictfp.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/ubsan-conditional-strictfp.c
@@ -0,0 +1,14 @@
+// RUN: %clang_cc1 %s -triple x86_64-unknown-unknown -ffp-exception-behavior=maytrap -emit-llvm -fsanitize=float-divide-by-zero -o - | FileCheck %s
+
+// Test that the constrained intrinsics are picking up the exception
+// metadata from the AST instead of the global default from the command line.
+// FIXME: All cases of "fpexcept.maytrap" in this test are wrong.
+
+#pragma float_control(except, on)
+
+_Bool b;
+// CHECK: @f(
+double f(double x) {
+  // CHECK: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double 0.000000e+00, metadata !"une", metadata !"fpexcept.maytrap")
+  return b ? 0.0 / x : 0.0;
+}
Index: clang/test/CodeGen/zvector-strictfp.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/zvector-strictfp.c
@@ -0,0 +1,211 @@
+// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 -fzvector -ffp-exception-behavior=maytrap -emit-llvm -o - -W -Wall -Werror %s | opt -S -mem2reg | FileCheck %s
+
+// Test that the constrained intrinsics are picking up the exception
+// metadata from the AST instead of the global default from the command line.
+// FIXME: All cases of "fpexcept.maytrap" in this test are wrong.
+
+#pragma float_control(except, on)
+
+volatile vector signed char sc, sc2;
+volatile vector unsigned char uc, uc2;
+volatile vector bool char bc, bc2;
+
+volatile vector signed short ss, ss2;
+volatile vector unsigned short us, us2;
+volatile vector bool short bs, bs2;
+
+volatile vector signed int si, si2;
+volatile vector unsigned int ui, ui2;
+volatile vector bool int bi, bi2;
+
+volatile vector signed long long sl, sl2;
+volatile vector unsigned long long ul, ul2;
+volatile vector bool long long bl, bl2;
+
+volatile vector double fd, fd2;
+
+volatile int cnt;
+
+// CHECK-LABEL: define void @test_preinc() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[INC:%.*]] = add <16 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK:   store volatile <16 x i8> [[INC]], <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[INC1:%.*]] = add <16 x i8> [[TMP1]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK:   store volatile <16 x i8> [[INC1]], <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[INC2:%.*]] = add <8 x i16> [[TMP2]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK:   store volatile <8 x i16> [[INC2]], <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[INC3:%.*]] = add <8 x i16> [[TMP3]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK:   store volatile <8 x i16> [[INC3]], <8 x i16>* @us2, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[INC4:%.*]] = add <4 x i32> [[TMP4]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK:   store volatile <4 x i32> [[INC4]], <4 x i32>* @si2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[INC5:%.*]] = add <4 x i32> [[TMP5]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK:   store volatile <4 x i32> [[INC5]], <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[INC6:%.*]] = add <2 x i64> [[TMP6]], <i64 1, i64 1>
+// CHECK:   store volatile <2 x i64> [[INC6]], <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[INC7:%.*]] = add <2 x i64> [[TMP7]], <i64 1, i64 1>
+// CHECK:   store volatile <2 x i64> [[INC7]], <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[INC8:%.*]] = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> [[TMP8]], <2 x double> <double 1.000000e+00, double 1.000000e+00>, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+// CHECK:   store volatile <2 x double> [[INC8]], <2 x double>* @fd2, align 8
+// CHECK:   ret void
+void test_preinc(void) {
+
+  ++sc2;
+  ++uc2;
+
+  ++ss2;
+  ++us2;
+
+  ++si2;
+  ++ui2;
+
+  ++sl2;
+  ++ul2;
+
+  ++fd2;
+}
+
+// CHECK-LABEL: define void @test_postinc() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[INC:%.*]] = add <16 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK:   store volatile <16 x i8> [[INC]], <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[INC1:%.*]] = add <16 x i8> [[TMP1]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK:   store volatile <16 x i8> [[INC1]], <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[INC2:%.*]] = add <8 x i16> [[TMP2]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK:   store volatile <8 x i16> [[INC2]], <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[INC3:%.*]] = add <8 x i16> [[TMP3]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK:   store volatile <8 x i16> [[INC3]], <8 x i16>* @us2, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[INC4:%.*]] = add <4 x i32> [[TMP4]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK:   store volatile <4 x i32> [[INC4]], <4 x i32>* @si2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[INC5:%.*]] = add <4 x i32> [[TMP5]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK:   store volatile <4 x i32> [[INC5]], <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[INC6:%.*]] = add <2 x i64> [[TMP6]], <i64 1, i64 1>
+// CHECK:   store volatile <2 x i64> [[INC6]], <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[INC7:%.*]] = add <2 x i64> [[TMP7]], <i64 1, i64 1>
+// CHECK:   store volatile <2 x i64> [[INC7]], <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[INC8:%.*]] = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> [[TMP8]], <2 x double> <double 1.000000e+00, double 1.000000e+00>, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+// CHECK:   store volatile <2 x double> [[INC8]], <2 x double>* @fd2, align 8
+// CHECK:   ret void
+void test_postinc(void) {
+
+  sc2++;
+  uc2++;
+
+  ss2++;
+  us2++;
+
+  si2++;
+  ui2++;
+
+  sl2++;
+  ul2++;
+
+  fd2++;
+}
+
+// CHECK-LABEL: define void @test_predec() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[DEC:%.*]] = add <16 x i8> [[TMP0]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK:   store volatile <16 x i8> [[DEC]], <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[DEC1:%.*]] = add <16 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK:   store volatile <16 x i8> [[DEC1]], <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[DEC2:%.*]] = add <8 x i16> [[TMP2]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK:   store volatile <8 x i16> [[DEC2]], <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[DEC3:%.*]] = add <8 x i16> [[TMP3]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK:   store volatile <8 x i16> [[DEC3]], <8 x i16>* @us2, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[DEC4:%.*]] = add <4 x i32> [[TMP4]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK:   store volatile <4 x i32> [[DEC4]], <4 x i32>* @si2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[DEC5:%.*]] = add <4 x i32> [[TMP5]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK:   store volatile <4 x i32> [[DEC5]], <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[DEC6:%.*]] = add <2 x i64> [[TMP6]], <i64 -1, i64 -1>
+// CHECK:   store volatile <2 x i64> [[DEC6]], <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[DEC7:%.*]] = add <2 x i64> [[TMP7]], <i64 -1, i64 -1>
+// CHECK:   store volatile <2 x i64> [[DEC7]], <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[DEC8:%.*]] = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> [[TMP8]], <2 x double> <double -1.000000e+00, double -1.000000e+00>, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+// CHECK:   store volatile <2 x double> [[DEC8]], <2 x double>* @fd2, align 8
+// CHECK:   ret void
+void test_predec(void) {
+
+  --sc2;
+  --uc2;
+
+  --ss2;
+  --us2;
+
+  --si2;
+  --ui2;
+
+  --sl2;
+  --ul2;
+
+  --fd2;
+}
+
+// CHECK-LABEL: define void @test_postdec() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[DEC:%.*]] = add <16 x i8> [[TMP0]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK:   store volatile <16 x i8> [[DEC]], <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[DEC1:%.*]] = add <16 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK:   store volatile <16 x i8> [[DEC1]], <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[DEC2:%.*]] = add <8 x i16> [[TMP2]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK:   store volatile <8 x i16> [[DEC2]], <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[DEC3:%.*]] = add <8 x i16> [[TMP3]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK:   store volatile <8 x i16> [[DEC3]], <8 x i16>* @us2, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[DEC4:%.*]] = add <4 x i32> [[TMP4]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK:   store volatile <4 x i32> [[DEC4]], <4 x i32>* @si2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[DEC5:%.*]] = add <4 x i32> [[TMP5]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK:   store volatile <4 x i32> [[DEC5]], <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[DEC6:%.*]] = add <2 x i64> [[TMP6]], <i64 -1, i64 -1>
+// CHECK:   store volatile <2 x i64> [[DEC6]], <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[DEC7:%.*]] = add <2 x i64> [[TMP7]], <i64 -1, i64 -1>
+// CHECK:   store volatile <2 x i64> [[DEC7]], <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[DEC8:%.*]] = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> [[TMP8]], <2 x double> <double -1.000000e+00, double -1.000000e+00>, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+// CHECK:   store volatile <2 x double> [[DEC8]], <2 x double>* @fd2, align 8
+// CHECK:   ret void
+void test_postdec(void) {
+
+  sc2--;
+  uc2--;
+
+  ss2--;
+  us2--;
+
+  si2--;
+  ui2--;
+
+  sl2--;
+  ul2--;
+
+  fd2--;
+}
Index: llvm/include/llvm/IR/IRBuilder.h
===================================================================
--- llvm/include/llvm/IR/IRBuilder.h
+++ llvm/include/llvm/IR/IRBuilder.h
@@ -266,11 +266,19 @@
 
   /// Set the exception handling to be used with constrained floating point
   void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
+#ifndef NDEBUG
+    Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
+    assert(ExceptStr.hasValue() && "Garbage strict exception behavior!");
+#endif
     DefaultConstrainedExcept = NewExcept;
   }
 
   /// Set the rounding mode handling to be used with constrained floating point
   void setDefaultConstrainedRounding(RoundingMode NewRounding) {
+#ifndef NDEBUG
+    Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
+    assert(RoundingStr.hasValue() && "Garbage strict rounding mode!");
+#endif
     DefaultConstrainedRounding = NewRounding;
   }