diff --git a/clang/include/clang/AST/StmtOpenMP.h b/clang/include/clang/AST/StmtOpenMP.h --- a/clang/include/clang/AST/StmtOpenMP.h +++ b/clang/include/clang/AST/StmtOpenMP.h @@ -2863,6 +2863,8 @@ POS_V, POS_E, POS_UpdateExpr, + POS_D, + POS_Cond, }; /// Set 'x' part of the associated expression/statement. @@ -2877,6 +2879,10 @@ void setV(Expr *V) { Data->getChildren()[DataPositionTy::POS_V] = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { Data->getChildren()[DataPositionTy::POS_E] = E; } + /// Set 'd' part of the associated expression/statement. + void setD(Expr *D) { Data->getChildren()[DataPositionTy::POS_D] = D; } + /// Set conditional expression in `atomic compare`. + void setCond(Expr *C) { Data->getChildren()[DataPositionTy::POS_Cond] = C; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' @@ -2894,6 +2900,8 @@ /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. + /// \param D 'd' part of the associated expression/statement. + /// \param Cond Conditional expression in `atomic compare` construct. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in @@ -2901,7 +2909,8 @@ static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, - Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); + Expr *E, Expr *UE, Expr *D, Expr *Cond, bool IsXLHSInRHSPart, + bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. @@ -2951,6 +2960,20 @@ const Expr *getExpr() const { return cast_or_null(Data->getChildren()[DataPositionTy::POS_E]); } + /// Get 'd' part of the associated expression/statement. + Expr *getD() { + return cast_or_null(Data->getChildren()[DataPositionTy::POS_D]); + } + Expr *getD() const { + return cast_or_null(Data->getChildren()[DataPositionTy::POS_D]); + } + /// Get the 'cond' part of the source atomic expression. + Expr *getCondExpr() { + return cast_or_null(Data->getChildren()[DataPositionTy::POS_Cond]); + } + Expr *getCondExpr() const { + return cast_or_null(Data->getChildren()[DataPositionTy::POS_Cond]); + } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; diff --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp --- a/clang/lib/AST/StmtOpenMP.cpp +++ b/clang/lib/AST/StmtOpenMP.cpp @@ -863,16 +863,20 @@ !IsStandalone); } -OMPAtomicDirective *OMPAtomicDirective::Create( - const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, - ArrayRef Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, - Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate) { +OMPAtomicDirective * +OMPAtomicDirective::Create(const ASTContext &C, SourceLocation StartLoc, + SourceLocation EndLoc, ArrayRef Clauses, + Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, + Expr *UE, Expr *D, Expr *Cond, bool IsXLHSInRHSPart, + bool IsPostfixUpdate) { auto *Dir = createDirective( - C, Clauses, AssociatedStmt, /*NumChildren=*/4, StartLoc, EndLoc); + C, Clauses, AssociatedStmt, /*NumChildren=*/6, StartLoc, EndLoc); Dir->setX(X); Dir->setV(V); Dir->setExpr(E); Dir->setUpdateExpr(UE); + Dir->setD(D); + Dir->setCond(Cond); Dir->IsXLHSInRHSPart = IsXLHSInRHSPart; Dir->IsPostfixUpdate = IsPostfixUpdate; return Dir; @@ -882,7 +886,7 @@ unsigned NumClauses, EmptyShell) { return createEmptyDirective( - C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/4); + C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/6); } OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C, diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp --- a/clang/lib/CodeGen/CGStmtOpenMP.cpp +++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp @@ -6017,11 +6017,50 @@ } } +static void emitOMPAtomicCompareExpr(CodeGenFunction &CGF, + llvm::AtomicOrdering AO, const Expr *X, + const Expr *E, const Expr *D, + const Expr *CE, bool IsXBinopExpr, + SourceLocation Loc) { + llvm::OpenMPIRBuilder &OMPBuilder = + CGF.CGM.getOpenMPRuntime().getOMPBuilder(); + + OMPAtomicCompareOp Op; + assert(isa(CE) && "CE is not a BinaryOperator"); + switch (cast(CE)->getOpcode()) { + case BO_EQ: + Op = OMPAtomicCompareOp::EQ; + break; + case BO_LT: + Op = OMPAtomicCompareOp::MIN; + break; + case BO_GT: + Op = OMPAtomicCompareOp::MAX; + break; + default: + llvm_unreachable("unsupported atomic compare binary operator"); + } + + LValue XLVal = CGF.EmitLValue(X); + llvm::Value *XPtr = XLVal.getPointer(CGF); + llvm::Value *EVal = CGF.EmitScalarExpr(E); + llvm::Value *DVal = D ? CGF.EmitScalarExpr(D) : nullptr; + + llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{ + XPtr, XPtr->getType()->getPointerElementType(), + X->getType().isVolatileQualified(), + X->getType()->hasSignedIntegerRepresentation()}; + + CGF.Builder.restoreIP(OMPBuilder.createAtomicCompare( + CGF.Builder, XOpVal, EVal, DVal, AO, Op, IsXBinopExpr)); +} + static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, llvm::AtomicOrdering AO, bool IsPostfixUpdate, const Expr *X, const Expr *V, const Expr *E, - const Expr *UE, bool IsXLHSInRHSPart, - bool IsCompareCapture, SourceLocation Loc) { + const Expr *UE, const Expr *D, const Expr *CE, + bool IsXLHSInRHSPart, bool IsCompareCapture, + SourceLocation Loc) { switch (Kind) { case OMPC_read: emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); @@ -6045,11 +6084,7 @@ "'atomic compare capture' is not supported for now"); CGF.CGM.getDiags().Report(DiagID); } else { - // Emit an error here. - unsigned DiagID = CGF.CGM.getDiags().getCustomDiagID( - DiagnosticsEngine::Error, - "'atomic compare' is not supported for now"); - CGF.CGM.getDiags().Report(DiagID); + emitOMPAtomicCompareExpr(CGF, AO, X, E, D, CE, IsXLHSInRHSPart, Loc); } break; } @@ -6202,8 +6237,8 @@ LexicalScope Scope(*this, S.getSourceRange()); EmitStopPoint(S.getAssociatedStmt()); emitOMPAtomicExpr(*this, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), - S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(), - IsCompareCapture, S.getBeginLoc()); + S.getExpr(), S.getUpdateExpr(), S.getD(), S.getCondExpr(), + S.isXLHSInRHSPart(), IsCompareCapture, S.getBeginLoc()); } static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp --- a/clang/lib/Sema/SemaOpenMP.cpp +++ b/clang/lib/Sema/SemaOpenMP.cpp @@ -11133,11 +11133,11 @@ switch (Cond->getOpcode()) { case BO_EQ: { C = Cond; - D = BO->getRHS(); + D = BO->getRHS()->IgnoreImpCasts(); if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS())) { - E = Cond->getRHS(); + E = Cond->getRHS()->IgnoreImpCasts(); } else if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) { - E = Cond->getLHS(); + E = Cond->getLHS()->IgnoreImpCasts(); } else { ErrorInfo.Error = ErrorTy::InvalidComparison; ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc(); @@ -11148,7 +11148,7 @@ } case BO_LT: case BO_GT: { - E = BO->getRHS(); + E = BO->getRHS()->IgnoreImpCasts(); if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS()) && checkIfTwoExprsAreSame(ContextRef, E, Cond->getRHS())) { C = Cond; @@ -11228,11 +11228,11 @@ switch (Cond->getOpcode()) { case BO_EQ: { C = Cond; - D = CO->getTrueExpr(); + D = CO->getTrueExpr()->IgnoreImpCasts(); if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS())) { - E = Cond->getRHS(); + E = Cond->getRHS()->IgnoreImpCasts(); } else if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) { - E = Cond->getLHS(); + E = Cond->getLHS()->IgnoreImpCasts(); } else { ErrorInfo.Error = ErrorTy::InvalidComparison; ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc(); @@ -11243,7 +11243,7 @@ } case BO_LT: case BO_GT: { - E = CO->getTrueExpr(); + E = CO->getTrueExpr()->IgnoreImpCasts(); if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS()) && checkIfTwoExprsAreSame(ContextRef, E, Cond->getRHS())) { C = Cond; @@ -11843,6 +11843,8 @@ Expr *V = nullptr; Expr *E = nullptr; Expr *UE = nullptr; + Expr *D = nullptr; + Expr *CE = nullptr; bool IsXLHSInRHSPart = false; bool IsPostfixUpdate = false; // OpenMP [2.12.6, atomic Construct] @@ -12252,15 +12254,19 @@ << ErrorInfo.Error << ErrorInfo.NoteRange; return StmtError(); } - // TODO: We don't set X, D, E, etc. here because in code gen we will emit - // error directly. + X = Checker.getX(); + E = Checker.getE(); + D = Checker.getD(); + CE = Checker.getCond(); + // We reuse IsXLHSInRHSPart to tell if it is in the form 'x ordop expr'. + IsXLHSInRHSPart = Checker.isXBinopExpr(); } } setFunctionHasBranchProtectedScope(); return OMPAtomicDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt, - X, V, E, UE, IsXLHSInRHSPart, + X, V, E, UE, D, CE, IsXLHSInRHSPart, IsPostfixUpdate); } diff --git a/clang/test/OpenMP/atomic_compare_codegen.cpp b/clang/test/OpenMP/atomic_compare_codegen.cpp new file mode 100644 --- /dev/null +++ b/clang/test/OpenMP/atomic_compare_codegen.cpp @@ -0,0 +1,4022 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ +// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -fopenmp-version=51 -x c -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=51 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=51 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s + +// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -fopenmp-version=51 -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=51 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=51 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s +// SIMD-ONLY0-NOT: {{__kmpc|__tgt}} +// expected-no-diagnostics + +#ifndef HEADER +#define HEADER + +void foo() { + char cx, ce, cd; + unsigned char ucx, uce, ucd; + short sx, se, sd; + unsigned short usx, use, usd; + int ix, ie, id; + unsigned int uix, uie, uid; + long lx, le, ld; + unsigned long ulx, ule, uld; + long long llx, lle, lld; + unsigned long long ullx, ulle, ulld; + +#pragma omp atomic compare + cx = cx > ce ? ce : cx; +#pragma omp atomic compare + cx = cx < ce ? ce : cx; +#pragma omp atomic compare + cx = ce > cx ? ce : cx; +#pragma omp atomic compare + cx = ce < cx ? ce : cx; +#pragma omp atomic compare + if (cx > ce) + cx = ce; +#pragma omp atomic compare + if (cx < ce) + cx = ce; +#pragma omp atomic compare + if (ce > cx) + cx = ce; +#pragma omp atomic compare + if (ce < cx) + cx = ce; + +#pragma omp atomic compare + cx = cx == ce ? cd : cx; +#pragma omp atomic compare + cx = ce == cx ? cd : cx; +#pragma omp atomic compare + if (cx == ce) + cx = cd; +#pragma omp atomic compare + if (ce == cx) + cx = cd; + +#pragma omp atomic compare + ucx = ucx > uce ? uce : ucx; +#pragma omp atomic compare + ucx = ucx < uce ? uce : ucx; +#pragma omp atomic compare + ucx = uce > ucx ? uce : ucx; +#pragma omp atomic compare + ucx = uce < ucx ? uce : ucx; +#pragma omp atomic compare + if (ucx > uce) + ucx = uce; +#pragma omp atomic compare + if (ucx < uce) + ucx = uce; +#pragma omp atomic compare + if (uce > ucx) + ucx = uce; +#pragma omp atomic compare + if (uce < ucx) + ucx = uce; + +#pragma omp atomic compare + ucx = ucx == uce ? ucd : ucx; +#pragma omp atomic compare + ucx = uce == ucx ? ucd : ucx; +#pragma omp atomic compare + if (ucx == uce) + ucx = ucd; +#pragma omp atomic compare + if (uce == ucx) + ucx = ucd; + +#pragma omp atomic compare acq_rel + cx = cx > ce ? ce : cx; +#pragma omp atomic compare acq_rel + cx = cx < ce ? ce : cx; +#pragma omp atomic compare acq_rel + cx = ce > cx ? ce : cx; +#pragma omp atomic compare acq_rel + cx = ce < cx ? ce : cx; +#pragma omp atomic compare acq_rel + if (cx > ce) + cx = ce; +#pragma omp atomic compare acq_rel + if (cx < ce) + cx = ce; +#pragma omp atomic compare acq_rel + if (ce > cx) + cx = ce; +#pragma omp atomic compare acq_rel + if (ce < cx) + cx = ce; + +#pragma omp atomic compare acq_rel + cx = cx == ce ? cd : cx; +#pragma omp atomic compare acq_rel + cx = ce == cx ? cd : cx; +#pragma omp atomic compare acq_rel + if (cx == ce) + cx = cd; +#pragma omp atomic compare acq_rel + if (ce == cx) + cx = cd; + +#pragma omp atomic compare acq_rel + ucx = ucx > uce ? uce : ucx; +#pragma omp atomic compare acq_rel + ucx = ucx < uce ? uce : ucx; +#pragma omp atomic compare acq_rel + ucx = uce > ucx ? uce : ucx; +#pragma omp atomic compare acq_rel + ucx = uce < ucx ? uce : ucx; +#pragma omp atomic compare acq_rel + if (ucx > uce) + ucx = uce; +#pragma omp atomic compare acq_rel + if (ucx < uce) + ucx = uce; +#pragma omp atomic compare acq_rel + if (uce > ucx) + ucx = uce; +#pragma omp atomic compare acq_rel + if (uce < ucx) + ucx = uce; + +#pragma omp atomic compare acq_rel + ucx = ucx == uce ? ucd : ucx; +#pragma omp atomic compare acq_rel + ucx = uce == ucx ? ucd : ucx; +#pragma omp atomic compare acq_rel + if (ucx == uce) + ucx = ucd; +#pragma omp atomic compare acq_rel + if (uce == ucx) + ucx = ucd; + +#pragma omp atomic compare acquire + cx = cx > ce ? ce : cx; +#pragma omp atomic compare acquire + cx = cx < ce ? ce : cx; +#pragma omp atomic compare acquire + cx = ce > cx ? ce : cx; +#pragma omp atomic compare acquire + cx = ce < cx ? ce : cx; +#pragma omp atomic compare acquire + if (cx > ce) + cx = ce; +#pragma omp atomic compare acquire + if (cx < ce) + cx = ce; +#pragma omp atomic compare acquire + if (ce > cx) + cx = ce; +#pragma omp atomic compare acquire + if (ce < cx) + cx = ce; + +#pragma omp atomic compare acquire + cx = cx == ce ? cd : cx; +#pragma omp atomic compare acquire + cx = ce == cx ? cd : cx; +#pragma omp atomic compare acquire + if (cx == ce) + cx = cd; +#pragma omp atomic compare acquire + if (ce == cx) + cx = cd; + +#pragma omp atomic compare acquire + ucx = ucx > uce ? uce : ucx; +#pragma omp atomic compare acquire + ucx = ucx < uce ? uce : ucx; +#pragma omp atomic compare acquire + ucx = uce > ucx ? uce : ucx; +#pragma omp atomic compare acquire + ucx = uce < ucx ? uce : ucx; +#pragma omp atomic compare acquire + if (ucx > uce) + ucx = uce; +#pragma omp atomic compare acquire + if (ucx < uce) + ucx = uce; +#pragma omp atomic compare acquire + if (uce > ucx) + ucx = uce; +#pragma omp atomic compare acquire + if (uce < ucx) + ucx = uce; + +#pragma omp atomic compare acquire + ucx = ucx == uce ? ucd : ucx; +#pragma omp atomic compare acquire + ucx = uce == ucx ? ucd : ucx; +#pragma omp atomic compare acquire + if (ucx == uce) + ucx = ucd; +#pragma omp atomic compare acquire + if (uce == ucx) + ucx = ucd; + +#pragma omp atomic compare relaxed + cx = cx > ce ? ce : cx; +#pragma omp atomic compare relaxed + cx = cx < ce ? ce : cx; +#pragma omp atomic compare relaxed + cx = ce > cx ? ce : cx; +#pragma omp atomic compare relaxed + cx = ce < cx ? ce : cx; +#pragma omp atomic compare relaxed + if (cx > ce) + cx = ce; +#pragma omp atomic compare relaxed + if (cx < ce) + cx = ce; +#pragma omp atomic compare relaxed + if (ce > cx) + cx = ce; +#pragma omp atomic compare relaxed + if (ce < cx) + cx = ce; + +#pragma omp atomic compare relaxed + cx = cx == ce ? cd : cx; +#pragma omp atomic compare relaxed + cx = ce == cx ? cd : cx; +#pragma omp atomic compare relaxed + if (cx == ce) + cx = cd; +#pragma omp atomic compare relaxed + if (ce == cx) + cx = cd; + +#pragma omp atomic compare relaxed + ucx = ucx > uce ? uce : ucx; +#pragma omp atomic compare relaxed + ucx = ucx < uce ? uce : ucx; +#pragma omp atomic compare relaxed + ucx = uce > ucx ? uce : ucx; +#pragma omp atomic compare relaxed + ucx = uce < ucx ? uce : ucx; +#pragma omp atomic compare relaxed + if (ucx > uce) + ucx = uce; +#pragma omp atomic compare relaxed + if (ucx < uce) + ucx = uce; +#pragma omp atomic compare relaxed + if (uce > ucx) + ucx = uce; +#pragma omp atomic compare relaxed + if (uce < ucx) + ucx = uce; + +#pragma omp atomic compare relaxed + ucx = ucx == uce ? ucd : ucx; +#pragma omp atomic compare relaxed + ucx = uce == ucx ? ucd : ucx; +#pragma omp atomic compare relaxed + if (ucx == uce) + ucx = ucd; +#pragma omp atomic compare relaxed + if (uce == ucx) + ucx = ucd; + +#pragma omp atomic compare release + cx = cx > ce ? ce : cx; +#pragma omp atomic compare release + cx = cx < ce ? ce : cx; +#pragma omp atomic compare release + cx = ce > cx ? ce : cx; +#pragma omp atomic compare release + cx = ce < cx ? ce : cx; +#pragma omp atomic compare release + if (cx > ce) + cx = ce; +#pragma omp atomic compare release + if (cx < ce) + cx = ce; +#pragma omp atomic compare release + if (ce > cx) + cx = ce; +#pragma omp atomic compare release + if (ce < cx) + cx = ce; + +#pragma omp atomic compare release + cx = cx == ce ? cd : cx; +#pragma omp atomic compare release + cx = ce == cx ? cd : cx; +#pragma omp atomic compare release + if (cx == ce) + cx = cd; +#pragma omp atomic compare release + if (ce == cx) + cx = cd; + +#pragma omp atomic compare release + ucx = ucx > uce ? uce : ucx; +#pragma omp atomic compare release + ucx = ucx < uce ? uce : ucx; +#pragma omp atomic compare release + ucx = uce > ucx ? uce : ucx; +#pragma omp atomic compare release + ucx = uce < ucx ? uce : ucx; +#pragma omp atomic compare release + if (ucx > uce) + ucx = uce; +#pragma omp atomic compare release + if (ucx < uce) + ucx = uce; +#pragma omp atomic compare release + if (uce > ucx) + ucx = uce; +#pragma omp atomic compare release + if (uce < ucx) + ucx = uce; + +#pragma omp atomic compare release + ucx = ucx == uce ? ucd : ucx; +#pragma omp atomic compare release + ucx = uce == ucx ? ucd : ucx; +#pragma omp atomic compare release + if (ucx == uce) + ucx = ucd; +#pragma omp atomic compare release + if (uce == ucx) + ucx = ucd; + +#pragma omp atomic compare seq_cst + cx = cx > ce ? ce : cx; +#pragma omp atomic compare seq_cst + cx = cx < ce ? ce : cx; +#pragma omp atomic compare seq_cst + cx = ce > cx ? ce : cx; +#pragma omp atomic compare seq_cst + cx = ce < cx ? ce : cx; +#pragma omp atomic compare seq_cst + if (cx > ce) + cx = ce; +#pragma omp atomic compare seq_cst + if (cx < ce) + cx = ce; +#pragma omp atomic compare seq_cst + if (ce > cx) + cx = ce; +#pragma omp atomic compare seq_cst + if (ce < cx) + cx = ce; + +#pragma omp atomic compare seq_cst + cx = cx == ce ? cd : cx; +#pragma omp atomic compare seq_cst + cx = ce == cx ? cd : cx; +#pragma omp atomic compare seq_cst + if (cx == ce) + cx = cd; +#pragma omp atomic compare seq_cst + if (ce == cx) + cx = cd; + +#pragma omp atomic compare seq_cst + ucx = ucx > uce ? uce : ucx; +#pragma omp atomic compare seq_cst + ucx = ucx < uce ? uce : ucx; +#pragma omp atomic compare seq_cst + ucx = uce > ucx ? uce : ucx; +#pragma omp atomic compare seq_cst + ucx = uce < ucx ? uce : ucx; +#pragma omp atomic compare seq_cst + if (ucx > uce) + ucx = uce; +#pragma omp atomic compare seq_cst + if (ucx < uce) + ucx = uce; +#pragma omp atomic compare seq_cst + if (uce > ucx) + ucx = uce; +#pragma omp atomic compare seq_cst + if (uce < ucx) + ucx = uce; + +#pragma omp atomic compare seq_cst + ucx = ucx == uce ? ucd : ucx; +#pragma omp atomic compare seq_cst + ucx = uce == ucx ? ucd : ucx; +#pragma omp atomic compare seq_cst + if (ucx == uce) + ucx = ucd; +#pragma omp atomic compare seq_cst + if (uce == ucx) + ucx = ucd; + +#pragma omp atomic compare + sx = sx > se ? se : sx; +#pragma omp atomic compare + sx = sx < se ? se : sx; +#pragma omp atomic compare + sx = se > sx ? se : sx; +#pragma omp atomic compare + sx = se < sx ? se : sx; +#pragma omp atomic compare + if (sx > se) + sx = se; +#pragma omp atomic compare + if (sx < se) + sx = se; +#pragma omp atomic compare + if (se > sx) + sx = se; +#pragma omp atomic compare + if (se < sx) + sx = se; + +#pragma omp atomic compare + sx = sx == se ? sd : sx; +#pragma omp atomic compare + sx = se == sx ? sd : sx; +#pragma omp atomic compare + if (sx == se) + sx = sd; +#pragma omp atomic compare + if (se == sx) + sx = sd; + +#pragma omp atomic compare + usx = usx > use ? use : usx; +#pragma omp atomic compare + usx = usx < use ? use : usx; +#pragma omp atomic compare + usx = use > usx ? use : usx; +#pragma omp atomic compare + usx = use < usx ? use : usx; +#pragma omp atomic compare + if (usx > use) + usx = use; +#pragma omp atomic compare + if (usx < use) + usx = use; +#pragma omp atomic compare + if (use > usx) + usx = use; +#pragma omp atomic compare + if (use < usx) + usx = use; + +#pragma omp atomic compare + usx = usx == use ? usd : usx; +#pragma omp atomic compare + usx = use == usx ? usd : usx; +#pragma omp atomic compare + if (usx == use) + usx = usd; +#pragma omp atomic compare + if (use == usx) + usx = usd; + +#pragma omp atomic compare acq_rel + sx = sx > se ? se : sx; +#pragma omp atomic compare acq_rel + sx = sx < se ? se : sx; +#pragma omp atomic compare acq_rel + sx = se > sx ? se : sx; +#pragma omp atomic compare acq_rel + sx = se < sx ? se : sx; +#pragma omp atomic compare acq_rel + if (sx > se) + sx = se; +#pragma omp atomic compare acq_rel + if (sx < se) + sx = se; +#pragma omp atomic compare acq_rel + if (se > sx) + sx = se; +#pragma omp atomic compare acq_rel + if (se < sx) + sx = se; + +#pragma omp atomic compare acq_rel + sx = sx == se ? sd : sx; +#pragma omp atomic compare acq_rel + sx = se == sx ? sd : sx; +#pragma omp atomic compare acq_rel + if (sx == se) + sx = sd; +#pragma omp atomic compare acq_rel + if (se == sx) + sx = sd; + +#pragma omp atomic compare acq_rel + usx = usx > use ? use : usx; +#pragma omp atomic compare acq_rel + usx = usx < use ? use : usx; +#pragma omp atomic compare acq_rel + usx = use > usx ? use : usx; +#pragma omp atomic compare acq_rel + usx = use < usx ? use : usx; +#pragma omp atomic compare acq_rel + if (usx > use) + usx = use; +#pragma omp atomic compare acq_rel + if (usx < use) + usx = use; +#pragma omp atomic compare acq_rel + if (use > usx) + usx = use; +#pragma omp atomic compare acq_rel + if (use < usx) + usx = use; + +#pragma omp atomic compare acq_rel + usx = usx == use ? usd : usx; +#pragma omp atomic compare acq_rel + usx = use == usx ? usd : usx; +#pragma omp atomic compare acq_rel + if (usx == use) + usx = usd; +#pragma omp atomic compare acq_rel + if (use == usx) + usx = usd; + +#pragma omp atomic compare acquire + sx = sx > se ? se : sx; +#pragma omp atomic compare acquire + sx = sx < se ? se : sx; +#pragma omp atomic compare acquire + sx = se > sx ? se : sx; +#pragma omp atomic compare acquire + sx = se < sx ? se : sx; +#pragma omp atomic compare acquire + if (sx > se) + sx = se; +#pragma omp atomic compare acquire + if (sx < se) + sx = se; +#pragma omp atomic compare acquire + if (se > sx) + sx = se; +#pragma omp atomic compare acquire + if (se < sx) + sx = se; + +#pragma omp atomic compare acquire + sx = sx == se ? sd : sx; +#pragma omp atomic compare acquire + sx = se == sx ? sd : sx; +#pragma omp atomic compare acquire + if (sx == se) + sx = sd; +#pragma omp atomic compare acquire + if (se == sx) + sx = sd; + +#pragma omp atomic compare acquire + usx = usx > use ? use : usx; +#pragma omp atomic compare acquire + usx = usx < use ? use : usx; +#pragma omp atomic compare acquire + usx = use > usx ? use : usx; +#pragma omp atomic compare acquire + usx = use < usx ? use : usx; +#pragma omp atomic compare acquire + if (usx > use) + usx = use; +#pragma omp atomic compare acquire + if (usx < use) + usx = use; +#pragma omp atomic compare acquire + if (use > usx) + usx = use; +#pragma omp atomic compare acquire + if (use < usx) + usx = use; + +#pragma omp atomic compare acquire + usx = usx == use ? usd : usx; +#pragma omp atomic compare acquire + usx = use == usx ? usd : usx; +#pragma omp atomic compare acquire + if (usx == use) + usx = usd; +#pragma omp atomic compare acquire + if (use == usx) + usx = usd; + +#pragma omp atomic compare relaxed + sx = sx > se ? se : sx; +#pragma omp atomic compare relaxed + sx = sx < se ? se : sx; +#pragma omp atomic compare relaxed + sx = se > sx ? se : sx; +#pragma omp atomic compare relaxed + sx = se < sx ? se : sx; +#pragma omp atomic compare relaxed + if (sx > se) + sx = se; +#pragma omp atomic compare relaxed + if (sx < se) + sx = se; +#pragma omp atomic compare relaxed + if (se > sx) + sx = se; +#pragma omp atomic compare relaxed + if (se < sx) + sx = se; + +#pragma omp atomic compare relaxed + sx = sx == se ? sd : sx; +#pragma omp atomic compare relaxed + sx = se == sx ? sd : sx; +#pragma omp atomic compare relaxed + if (sx == se) + sx = sd; +#pragma omp atomic compare relaxed + if (se == sx) + sx = sd; + +#pragma omp atomic compare relaxed + usx = usx > use ? use : usx; +#pragma omp atomic compare relaxed + usx = usx < use ? use : usx; +#pragma omp atomic compare relaxed + usx = use > usx ? use : usx; +#pragma omp atomic compare relaxed + usx = use < usx ? use : usx; +#pragma omp atomic compare relaxed + if (usx > use) + usx = use; +#pragma omp atomic compare relaxed + if (usx < use) + usx = use; +#pragma omp atomic compare relaxed + if (use > usx) + usx = use; +#pragma omp atomic compare relaxed + if (use < usx) + usx = use; + +#pragma omp atomic compare relaxed + usx = usx == use ? usd : usx; +#pragma omp atomic compare relaxed + usx = use == usx ? usd : usx; +#pragma omp atomic compare relaxed + if (usx == use) + usx = usd; +#pragma omp atomic compare relaxed + if (use == usx) + usx = usd; + +#pragma omp atomic compare release + sx = sx > se ? se : sx; +#pragma omp atomic compare release + sx = sx < se ? se : sx; +#pragma omp atomic compare release + sx = se > sx ? se : sx; +#pragma omp atomic compare release + sx = se < sx ? se : sx; +#pragma omp atomic compare release + if (sx > se) + sx = se; +#pragma omp atomic compare release + if (sx < se) + sx = se; +#pragma omp atomic compare release + if (se > sx) + sx = se; +#pragma omp atomic compare release + if (se < sx) + sx = se; + +#pragma omp atomic compare release + sx = sx == se ? sd : sx; +#pragma omp atomic compare release + sx = se == sx ? sd : sx; +#pragma omp atomic compare release + if (sx == se) + sx = sd; +#pragma omp atomic compare release + if (se == sx) + sx = sd; + +#pragma omp atomic compare release + usx = usx > use ? use : usx; +#pragma omp atomic compare release + usx = usx < use ? use : usx; +#pragma omp atomic compare release + usx = use > usx ? use : usx; +#pragma omp atomic compare release + usx = use < usx ? use : usx; +#pragma omp atomic compare release + if (usx > use) + usx = use; +#pragma omp atomic compare release + if (usx < use) + usx = use; +#pragma omp atomic compare release + if (use > usx) + usx = use; +#pragma omp atomic compare release + if (use < usx) + usx = use; + +#pragma omp atomic compare release + usx = usx == use ? usd : usx; +#pragma omp atomic compare release + usx = use == usx ? usd : usx; +#pragma omp atomic compare release + if (usx == use) + usx = usd; +#pragma omp atomic compare release + if (use == usx) + usx = usd; + +#pragma omp atomic compare seq_cst + sx = sx > se ? se : sx; +#pragma omp atomic compare seq_cst + sx = sx < se ? se : sx; +#pragma omp atomic compare seq_cst + sx = se > sx ? se : sx; +#pragma omp atomic compare seq_cst + sx = se < sx ? se : sx; +#pragma omp atomic compare seq_cst + if (sx > se) + sx = se; +#pragma omp atomic compare seq_cst + if (sx < se) + sx = se; +#pragma omp atomic compare seq_cst + if (se > sx) + sx = se; +#pragma omp atomic compare seq_cst + if (se < sx) + sx = se; + +#pragma omp atomic compare seq_cst + sx = sx == se ? sd : sx; +#pragma omp atomic compare seq_cst + sx = se == sx ? sd : sx; +#pragma omp atomic compare seq_cst + if (sx == se) + sx = sd; +#pragma omp atomic compare seq_cst + if (se == sx) + sx = sd; + +#pragma omp atomic compare seq_cst + usx = usx > use ? use : usx; +#pragma omp atomic compare seq_cst + usx = usx < use ? use : usx; +#pragma omp atomic compare seq_cst + usx = use > usx ? use : usx; +#pragma omp atomic compare seq_cst + usx = use < usx ? use : usx; +#pragma omp atomic compare seq_cst + if (usx > use) + usx = use; +#pragma omp atomic compare seq_cst + if (usx < use) + usx = use; +#pragma omp atomic compare seq_cst + if (use > usx) + usx = use; +#pragma omp atomic compare seq_cst + if (use < usx) + usx = use; + +#pragma omp atomic compare seq_cst + usx = usx == use ? usd : usx; +#pragma omp atomic compare seq_cst + usx = use == usx ? usd : usx; +#pragma omp atomic compare seq_cst + if (usx == use) + usx = usd; +#pragma omp atomic compare seq_cst + if (use == usx) + usx = usd; + +#pragma omp atomic compare + ix = ix > ie ? ie : ix; +#pragma omp atomic compare + ix = ix < ie ? ie : ix; +#pragma omp atomic compare + ix = ie > ix ? ie : ix; +#pragma omp atomic compare + ix = ie < ix ? ie : ix; +#pragma omp atomic compare + if (ix > ie) + ix = ie; +#pragma omp atomic compare + if (ix < ie) + ix = ie; +#pragma omp atomic compare + if (ie > ix) + ix = ie; +#pragma omp atomic compare + if (ie < ix) + ix = ie; + +#pragma omp atomic compare + ix = ix == ie ? id : ix; +#pragma omp atomic compare + ix = ie == ix ? id : ix; +#pragma omp atomic compare + if (ix == ie) + ix = id; +#pragma omp atomic compare + if (ie == ix) + ix = id; + +#pragma omp atomic compare + uix = uix > uie ? uie : uix; +#pragma omp atomic compare + uix = uix < uie ? uie : uix; +#pragma omp atomic compare + uix = uie > uix ? uie : uix; +#pragma omp atomic compare + uix = uie < uix ? uie : uix; +#pragma omp atomic compare + if (uix > uie) + uix = uie; +#pragma omp atomic compare + if (uix < uie) + uix = uie; +#pragma omp atomic compare + if (uie > uix) + uix = uie; +#pragma omp atomic compare + if (uie < uix) + uix = uie; + +#pragma omp atomic compare + uix = uix == uie ? uid : uix; +#pragma omp atomic compare + uix = uie == uix ? uid : uix; +#pragma omp atomic compare + if (uix == uie) + uix = uid; +#pragma omp atomic compare + if (uie == uix) + uix = uid; + +#pragma omp atomic compare acq_rel + ix = ix > ie ? ie : ix; +#pragma omp atomic compare acq_rel + ix = ix < ie ? ie : ix; +#pragma omp atomic compare acq_rel + ix = ie > ix ? ie : ix; +#pragma omp atomic compare acq_rel + ix = ie < ix ? ie : ix; +#pragma omp atomic compare acq_rel + if (ix > ie) + ix = ie; +#pragma omp atomic compare acq_rel + if (ix < ie) + ix = ie; +#pragma omp atomic compare acq_rel + if (ie > ix) + ix = ie; +#pragma omp atomic compare acq_rel + if (ie < ix) + ix = ie; + +#pragma omp atomic compare acq_rel + ix = ix == ie ? id : ix; +#pragma omp atomic compare acq_rel + ix = ie == ix ? id : ix; +#pragma omp atomic compare acq_rel + if (ix == ie) + ix = id; +#pragma omp atomic compare acq_rel + if (ie == ix) + ix = id; + +#pragma omp atomic compare acq_rel + uix = uix > uie ? uie : uix; +#pragma omp atomic compare acq_rel + uix = uix < uie ? uie : uix; +#pragma omp atomic compare acq_rel + uix = uie > uix ? uie : uix; +#pragma omp atomic compare acq_rel + uix = uie < uix ? uie : uix; +#pragma omp atomic compare acq_rel + if (uix > uie) + uix = uie; +#pragma omp atomic compare acq_rel + if (uix < uie) + uix = uie; +#pragma omp atomic compare acq_rel + if (uie > uix) + uix = uie; +#pragma omp atomic compare acq_rel + if (uie < uix) + uix = uie; + +#pragma omp atomic compare acq_rel + uix = uix == uie ? uid : uix; +#pragma omp atomic compare acq_rel + uix = uie == uix ? uid : uix; +#pragma omp atomic compare acq_rel + if (uix == uie) + uix = uid; +#pragma omp atomic compare acq_rel + if (uie == uix) + uix = uid; + +#pragma omp atomic compare acquire + ix = ix > ie ? ie : ix; +#pragma omp atomic compare acquire + ix = ix < ie ? ie : ix; +#pragma omp atomic compare acquire + ix = ie > ix ? ie : ix; +#pragma omp atomic compare acquire + ix = ie < ix ? ie : ix; +#pragma omp atomic compare acquire + if (ix > ie) + ix = ie; +#pragma omp atomic compare acquire + if (ix < ie) + ix = ie; +#pragma omp atomic compare acquire + if (ie > ix) + ix = ie; +#pragma omp atomic compare acquire + if (ie < ix) + ix = ie; + +#pragma omp atomic compare acquire + ix = ix == ie ? id : ix; +#pragma omp atomic compare acquire + ix = ie == ix ? id : ix; +#pragma omp atomic compare acquire + if (ix == ie) + ix = id; +#pragma omp atomic compare acquire + if (ie == ix) + ix = id; + +#pragma omp atomic compare acquire + uix = uix > uie ? uie : uix; +#pragma omp atomic compare acquire + uix = uix < uie ? uie : uix; +#pragma omp atomic compare acquire + uix = uie > uix ? uie : uix; +#pragma omp atomic compare acquire + uix = uie < uix ? uie : uix; +#pragma omp atomic compare acquire + if (uix > uie) + uix = uie; +#pragma omp atomic compare acquire + if (uix < uie) + uix = uie; +#pragma omp atomic compare acquire + if (uie > uix) + uix = uie; +#pragma omp atomic compare acquire + if (uie < uix) + uix = uie; + +#pragma omp atomic compare acquire + uix = uix == uie ? uid : uix; +#pragma omp atomic compare acquire + uix = uie == uix ? uid : uix; +#pragma omp atomic compare acquire + if (uix == uie) + uix = uid; +#pragma omp atomic compare acquire + if (uie == uix) + uix = uid; + +#pragma omp atomic compare relaxed + ix = ix > ie ? ie : ix; +#pragma omp atomic compare relaxed + ix = ix < ie ? ie : ix; +#pragma omp atomic compare relaxed + ix = ie > ix ? ie : ix; +#pragma omp atomic compare relaxed + ix = ie < ix ? ie : ix; +#pragma omp atomic compare relaxed + if (ix > ie) + ix = ie; +#pragma omp atomic compare relaxed + if (ix < ie) + ix = ie; +#pragma omp atomic compare relaxed + if (ie > ix) + ix = ie; +#pragma omp atomic compare relaxed + if (ie < ix) + ix = ie; + +#pragma omp atomic compare relaxed + ix = ix == ie ? id : ix; +#pragma omp atomic compare relaxed + ix = ie == ix ? id : ix; +#pragma omp atomic compare relaxed + if (ix == ie) + ix = id; +#pragma omp atomic compare relaxed + if (ie == ix) + ix = id; + +#pragma omp atomic compare relaxed + uix = uix > uie ? uie : uix; +#pragma omp atomic compare relaxed + uix = uix < uie ? uie : uix; +#pragma omp atomic compare relaxed + uix = uie > uix ? uie : uix; +#pragma omp atomic compare relaxed + uix = uie < uix ? uie : uix; +#pragma omp atomic compare relaxed + if (uix > uie) + uix = uie; +#pragma omp atomic compare relaxed + if (uix < uie) + uix = uie; +#pragma omp atomic compare relaxed + if (uie > uix) + uix = uie; +#pragma omp atomic compare relaxed + if (uie < uix) + uix = uie; + +#pragma omp atomic compare relaxed + uix = uix == uie ? uid : uix; +#pragma omp atomic compare relaxed + uix = uie == uix ? uid : uix; +#pragma omp atomic compare relaxed + if (uix == uie) + uix = uid; +#pragma omp atomic compare relaxed + if (uie == uix) + uix = uid; + +#pragma omp atomic compare release + ix = ix > ie ? ie : ix; +#pragma omp atomic compare release + ix = ix < ie ? ie : ix; +#pragma omp atomic compare release + ix = ie > ix ? ie : ix; +#pragma omp atomic compare release + ix = ie < ix ? ie : ix; +#pragma omp atomic compare release + if (ix > ie) + ix = ie; +#pragma omp atomic compare release + if (ix < ie) + ix = ie; +#pragma omp atomic compare release + if (ie > ix) + ix = ie; +#pragma omp atomic compare release + if (ie < ix) + ix = ie; + +#pragma omp atomic compare release + ix = ix == ie ? id : ix; +#pragma omp atomic compare release + ix = ie == ix ? id : ix; +#pragma omp atomic compare release + if (ix == ie) + ix = id; +#pragma omp atomic compare release + if (ie == ix) + ix = id; + +#pragma omp atomic compare release + uix = uix > uie ? uie : uix; +#pragma omp atomic compare release + uix = uix < uie ? uie : uix; +#pragma omp atomic compare release + uix = uie > uix ? uie : uix; +#pragma omp atomic compare release + uix = uie < uix ? uie : uix; +#pragma omp atomic compare release + if (uix > uie) + uix = uie; +#pragma omp atomic compare release + if (uix < uie) + uix = uie; +#pragma omp atomic compare release + if (uie > uix) + uix = uie; +#pragma omp atomic compare release + if (uie < uix) + uix = uie; + +#pragma omp atomic compare release + uix = uix == uie ? uid : uix; +#pragma omp atomic compare release + uix = uie == uix ? uid : uix; +#pragma omp atomic compare release + if (uix == uie) + uix = uid; +#pragma omp atomic compare release + if (uie == uix) + uix = uid; + +#pragma omp atomic compare seq_cst + ix = ix > ie ? ie : ix; +#pragma omp atomic compare seq_cst + ix = ix < ie ? ie : ix; +#pragma omp atomic compare seq_cst + ix = ie > ix ? ie : ix; +#pragma omp atomic compare seq_cst + ix = ie < ix ? ie : ix; +#pragma omp atomic compare seq_cst + if (ix > ie) + ix = ie; +#pragma omp atomic compare seq_cst + if (ix < ie) + ix = ie; +#pragma omp atomic compare seq_cst + if (ie > ix) + ix = ie; +#pragma omp atomic compare seq_cst + if (ie < ix) + ix = ie; + +#pragma omp atomic compare seq_cst + ix = ix == ie ? id : ix; +#pragma omp atomic compare seq_cst + ix = ie == ix ? id : ix; +#pragma omp atomic compare seq_cst + if (ix == ie) + ix = id; +#pragma omp atomic compare seq_cst + if (ie == ix) + ix = id; + +#pragma omp atomic compare seq_cst + uix = uix > uie ? uie : uix; +#pragma omp atomic compare seq_cst + uix = uix < uie ? uie : uix; +#pragma omp atomic compare seq_cst + uix = uie > uix ? uie : uix; +#pragma omp atomic compare seq_cst + uix = uie < uix ? uie : uix; +#pragma omp atomic compare seq_cst + if (uix > uie) + uix = uie; +#pragma omp atomic compare seq_cst + if (uix < uie) + uix = uie; +#pragma omp atomic compare seq_cst + if (uie > uix) + uix = uie; +#pragma omp atomic compare seq_cst + if (uie < uix) + uix = uie; + +#pragma omp atomic compare seq_cst + uix = uix == uie ? uid : uix; +#pragma omp atomic compare seq_cst + uix = uie == uix ? uid : uix; +#pragma omp atomic compare seq_cst + if (uix == uie) + uix = uid; +#pragma omp atomic compare seq_cst + if (uie == uix) + uix = uid; + +#pragma omp atomic compare + lx = lx > le ? le : lx; +#pragma omp atomic compare + lx = lx < le ? le : lx; +#pragma omp atomic compare + lx = le > lx ? le : lx; +#pragma omp atomic compare + lx = le < lx ? le : lx; +#pragma omp atomic compare + if (lx > le) + lx = le; +#pragma omp atomic compare + if (lx < le) + lx = le; +#pragma omp atomic compare + if (le > lx) + lx = le; +#pragma omp atomic compare + if (le < lx) + lx = le; + +#pragma omp atomic compare + lx = lx == le ? ld : lx; +#pragma omp atomic compare + lx = le == lx ? ld : lx; +#pragma omp atomic compare + if (lx == le) + lx = ld; +#pragma omp atomic compare + if (le == lx) + lx = ld; + +#pragma omp atomic compare + ulx = ulx > ule ? ule : ulx; +#pragma omp atomic compare + ulx = ulx < ule ? ule : ulx; +#pragma omp atomic compare + ulx = ule > ulx ? ule : ulx; +#pragma omp atomic compare + ulx = ule < ulx ? ule : ulx; +#pragma omp atomic compare + if (ulx > ule) + ulx = ule; +#pragma omp atomic compare + if (ulx < ule) + ulx = ule; +#pragma omp atomic compare + if (ule > ulx) + ulx = ule; +#pragma omp atomic compare + if (ule < ulx) + ulx = ule; + +#pragma omp atomic compare + ulx = ulx == ule ? uld : ulx; +#pragma omp atomic compare + ulx = ule == ulx ? uld : ulx; +#pragma omp atomic compare + if (ulx == ule) + ulx = uld; +#pragma omp atomic compare + if (ule == ulx) + ulx = uld; + +#pragma omp atomic compare acq_rel + lx = lx > le ? le : lx; +#pragma omp atomic compare acq_rel + lx = lx < le ? le : lx; +#pragma omp atomic compare acq_rel + lx = le > lx ? le : lx; +#pragma omp atomic compare acq_rel + lx = le < lx ? le : lx; +#pragma omp atomic compare acq_rel + if (lx > le) + lx = le; +#pragma omp atomic compare acq_rel + if (lx < le) + lx = le; +#pragma omp atomic compare acq_rel + if (le > lx) + lx = le; +#pragma omp atomic compare acq_rel + if (le < lx) + lx = le; + +#pragma omp atomic compare acq_rel + lx = lx == le ? ld : lx; +#pragma omp atomic compare acq_rel + lx = le == lx ? ld : lx; +#pragma omp atomic compare acq_rel + if (lx == le) + lx = ld; +#pragma omp atomic compare acq_rel + if (le == lx) + lx = ld; + +#pragma omp atomic compare acq_rel + ulx = ulx > ule ? ule : ulx; +#pragma omp atomic compare acq_rel + ulx = ulx < ule ? ule : ulx; +#pragma omp atomic compare acq_rel + ulx = ule > ulx ? ule : ulx; +#pragma omp atomic compare acq_rel + ulx = ule < ulx ? ule : ulx; +#pragma omp atomic compare acq_rel + if (ulx > ule) + ulx = ule; +#pragma omp atomic compare acq_rel + if (ulx < ule) + ulx = ule; +#pragma omp atomic compare acq_rel + if (ule > ulx) + ulx = ule; +#pragma omp atomic compare acq_rel + if (ule < ulx) + ulx = ule; + +#pragma omp atomic compare acq_rel + ulx = ulx == ule ? uld : ulx; +#pragma omp atomic compare acq_rel + ulx = ule == ulx ? uld : ulx; +#pragma omp atomic compare acq_rel + if (ulx == ule) + ulx = uld; +#pragma omp atomic compare acq_rel + if (ule == ulx) + ulx = uld; + +#pragma omp atomic compare acquire + lx = lx > le ? le : lx; +#pragma omp atomic compare acquire + lx = lx < le ? le : lx; +#pragma omp atomic compare acquire + lx = le > lx ? le : lx; +#pragma omp atomic compare acquire + lx = le < lx ? le : lx; +#pragma omp atomic compare acquire + if (lx > le) + lx = le; +#pragma omp atomic compare acquire + if (lx < le) + lx = le; +#pragma omp atomic compare acquire + if (le > lx) + lx = le; +#pragma omp atomic compare acquire + if (le < lx) + lx = le; + +#pragma omp atomic compare acquire + lx = lx == le ? ld : lx; +#pragma omp atomic compare acquire + lx = le == lx ? ld : lx; +#pragma omp atomic compare acquire + if (lx == le) + lx = ld; +#pragma omp atomic compare acquire + if (le == lx) + lx = ld; + +#pragma omp atomic compare acquire + ulx = ulx > ule ? ule : ulx; +#pragma omp atomic compare acquire + ulx = ulx < ule ? ule : ulx; +#pragma omp atomic compare acquire + ulx = ule > ulx ? ule : ulx; +#pragma omp atomic compare acquire + ulx = ule < ulx ? ule : ulx; +#pragma omp atomic compare acquire + if (ulx > ule) + ulx = ule; +#pragma omp atomic compare acquire + if (ulx < ule) + ulx = ule; +#pragma omp atomic compare acquire + if (ule > ulx) + ulx = ule; +#pragma omp atomic compare acquire + if (ule < ulx) + ulx = ule; + +#pragma omp atomic compare acquire + ulx = ulx == ule ? uld : ulx; +#pragma omp atomic compare acquire + ulx = ule == ulx ? uld : ulx; +#pragma omp atomic compare acquire + if (ulx == ule) + ulx = uld; +#pragma omp atomic compare acquire + if (ule == ulx) + ulx = uld; + +#pragma omp atomic compare relaxed + lx = lx > le ? le : lx; +#pragma omp atomic compare relaxed + lx = lx < le ? le : lx; +#pragma omp atomic compare relaxed + lx = le > lx ? le : lx; +#pragma omp atomic compare relaxed + lx = le < lx ? le : lx; +#pragma omp atomic compare relaxed + if (lx > le) + lx = le; +#pragma omp atomic compare relaxed + if (lx < le) + lx = le; +#pragma omp atomic compare relaxed + if (le > lx) + lx = le; +#pragma omp atomic compare relaxed + if (le < lx) + lx = le; + +#pragma omp atomic compare relaxed + lx = lx == le ? ld : lx; +#pragma omp atomic compare relaxed + lx = le == lx ? ld : lx; +#pragma omp atomic compare relaxed + if (lx == le) + lx = ld; +#pragma omp atomic compare relaxed + if (le == lx) + lx = ld; + +#pragma omp atomic compare relaxed + ulx = ulx > ule ? ule : ulx; +#pragma omp atomic compare relaxed + ulx = ulx < ule ? ule : ulx; +#pragma omp atomic compare relaxed + ulx = ule > ulx ? ule : ulx; +#pragma omp atomic compare relaxed + ulx = ule < ulx ? ule : ulx; +#pragma omp atomic compare relaxed + if (ulx > ule) + ulx = ule; +#pragma omp atomic compare relaxed + if (ulx < ule) + ulx = ule; +#pragma omp atomic compare relaxed + if (ule > ulx) + ulx = ule; +#pragma omp atomic compare relaxed + if (ule < ulx) + ulx = ule; + +#pragma omp atomic compare relaxed + ulx = ulx == ule ? uld : ulx; +#pragma omp atomic compare relaxed + ulx = ule == ulx ? uld : ulx; +#pragma omp atomic compare relaxed + if (ulx == ule) + ulx = uld; +#pragma omp atomic compare relaxed + if (ule == ulx) + ulx = uld; + +#pragma omp atomic compare release + lx = lx > le ? le : lx; +#pragma omp atomic compare release + lx = lx < le ? le : lx; +#pragma omp atomic compare release + lx = le > lx ? le : lx; +#pragma omp atomic compare release + lx = le < lx ? le : lx; +#pragma omp atomic compare release + if (lx > le) + lx = le; +#pragma omp atomic compare release + if (lx < le) + lx = le; +#pragma omp atomic compare release + if (le > lx) + lx = le; +#pragma omp atomic compare release + if (le < lx) + lx = le; + +#pragma omp atomic compare release + lx = lx == le ? ld : lx; +#pragma omp atomic compare release + lx = le == lx ? ld : lx; +#pragma omp atomic compare release + if (lx == le) + lx = ld; +#pragma omp atomic compare release + if (le == lx) + lx = ld; + +#pragma omp atomic compare release + ulx = ulx > ule ? ule : ulx; +#pragma omp atomic compare release + ulx = ulx < ule ? ule : ulx; +#pragma omp atomic compare release + ulx = ule > ulx ? ule : ulx; +#pragma omp atomic compare release + ulx = ule < ulx ? ule : ulx; +#pragma omp atomic compare release + if (ulx > ule) + ulx = ule; +#pragma omp atomic compare release + if (ulx < ule) + ulx = ule; +#pragma omp atomic compare release + if (ule > ulx) + ulx = ule; +#pragma omp atomic compare release + if (ule < ulx) + ulx = ule; + +#pragma omp atomic compare release + ulx = ulx == ule ? uld : ulx; +#pragma omp atomic compare release + ulx = ule == ulx ? uld : ulx; +#pragma omp atomic compare release + if (ulx == ule) + ulx = uld; +#pragma omp atomic compare release + if (ule == ulx) + ulx = uld; + +#pragma omp atomic compare seq_cst + lx = lx > le ? le : lx; +#pragma omp atomic compare seq_cst + lx = lx < le ? le : lx; +#pragma omp atomic compare seq_cst + lx = le > lx ? le : lx; +#pragma omp atomic compare seq_cst + lx = le < lx ? le : lx; +#pragma omp atomic compare seq_cst + if (lx > le) + lx = le; +#pragma omp atomic compare seq_cst + if (lx < le) + lx = le; +#pragma omp atomic compare seq_cst + if (le > lx) + lx = le; +#pragma omp atomic compare seq_cst + if (le < lx) + lx = le; + +#pragma omp atomic compare seq_cst + lx = lx == le ? ld : lx; +#pragma omp atomic compare seq_cst + lx = le == lx ? ld : lx; +#pragma omp atomic compare seq_cst + if (lx == le) + lx = ld; +#pragma omp atomic compare seq_cst + if (le == lx) + lx = ld; + +#pragma omp atomic compare seq_cst + ulx = ulx > ule ? ule : ulx; +#pragma omp atomic compare seq_cst + ulx = ulx < ule ? ule : ulx; +#pragma omp atomic compare seq_cst + ulx = ule > ulx ? ule : ulx; +#pragma omp atomic compare seq_cst + ulx = ule < ulx ? ule : ulx; +#pragma omp atomic compare seq_cst + if (ulx > ule) + ulx = ule; +#pragma omp atomic compare seq_cst + if (ulx < ule) + ulx = ule; +#pragma omp atomic compare seq_cst + if (ule > ulx) + ulx = ule; +#pragma omp atomic compare seq_cst + if (ule < ulx) + ulx = ule; + +#pragma omp atomic compare seq_cst + ulx = ulx == ule ? uld : ulx; +#pragma omp atomic compare seq_cst + ulx = ule == ulx ? uld : ulx; +#pragma omp atomic compare seq_cst + if (ulx == ule) + ulx = uld; +#pragma omp atomic compare seq_cst + if (ule == ulx) + ulx = uld; + +#pragma omp atomic compare + llx = llx > lle ? lle : llx; +#pragma omp atomic compare + llx = llx < lle ? lle : llx; +#pragma omp atomic compare + llx = lle > llx ? lle : llx; +#pragma omp atomic compare + llx = lle < llx ? lle : llx; +#pragma omp atomic compare + if (llx > lle) + llx = lle; +#pragma omp atomic compare + if (llx < lle) + llx = lle; +#pragma omp atomic compare + if (lle > llx) + llx = lle; +#pragma omp atomic compare + if (lle < llx) + llx = lle; + +#pragma omp atomic compare + llx = llx == lle ? lld : llx; +#pragma omp atomic compare + llx = lle == llx ? lld : llx; +#pragma omp atomic compare + if (llx == lle) + llx = lld; +#pragma omp atomic compare + if (lle == llx) + llx = lld; + +#pragma omp atomic compare + ullx = ullx > ulle ? ulle : ullx; +#pragma omp atomic compare + ullx = ullx < ulle ? ulle : ullx; +#pragma omp atomic compare + ullx = ulle > ullx ? ulle : ullx; +#pragma omp atomic compare + ullx = ulle < ullx ? ulle : ullx; +#pragma omp atomic compare + if (ullx > ulle) + ullx = ulle; +#pragma omp atomic compare + if (ullx < ulle) + ullx = ulle; +#pragma omp atomic compare + if (ulle > ullx) + ullx = ulle; +#pragma omp atomic compare + if (ulle < ullx) + ullx = ulle; + +#pragma omp atomic compare + ullx = ullx == ulle ? ulld : ullx; +#pragma omp atomic compare + ullx = ulle == ullx ? ulld : ullx; +#pragma omp atomic compare + if (ullx == ulle) + ullx = ulld; +#pragma omp atomic compare + if (ulle == ullx) + ullx = ulld; + +#pragma omp atomic compare acq_rel + llx = llx > lle ? lle : llx; +#pragma omp atomic compare acq_rel + llx = llx < lle ? lle : llx; +#pragma omp atomic compare acq_rel + llx = lle > llx ? lle : llx; +#pragma omp atomic compare acq_rel + llx = lle < llx ? lle : llx; +#pragma omp atomic compare acq_rel + if (llx > lle) + llx = lle; +#pragma omp atomic compare acq_rel + if (llx < lle) + llx = lle; +#pragma omp atomic compare acq_rel + if (lle > llx) + llx = lle; +#pragma omp atomic compare acq_rel + if (lle < llx) + llx = lle; + +#pragma omp atomic compare acq_rel + llx = llx == lle ? lld : llx; +#pragma omp atomic compare acq_rel + llx = lle == llx ? lld : llx; +#pragma omp atomic compare acq_rel + if (llx == lle) + llx = lld; +#pragma omp atomic compare acq_rel + if (lle == llx) + llx = lld; + +#pragma omp atomic compare acq_rel + ullx = ullx > ulle ? ulle : ullx; +#pragma omp atomic compare acq_rel + ullx = ullx < ulle ? ulle : ullx; +#pragma omp atomic compare acq_rel + ullx = ulle > ullx ? ulle : ullx; +#pragma omp atomic compare acq_rel + ullx = ulle < ullx ? ulle : ullx; +#pragma omp atomic compare acq_rel + if (ullx > ulle) + ullx = ulle; +#pragma omp atomic compare acq_rel + if (ullx < ulle) + ullx = ulle; +#pragma omp atomic compare acq_rel + if (ulle > ullx) + ullx = ulle; +#pragma omp atomic compare acq_rel + if (ulle < ullx) + ullx = ulle; + +#pragma omp atomic compare acq_rel + ullx = ullx == ulle ? ulld : ullx; +#pragma omp atomic compare acq_rel + ullx = ulle == ullx ? ulld : ullx; +#pragma omp atomic compare acq_rel + if (ullx == ulle) + ullx = ulld; +#pragma omp atomic compare acq_rel + if (ulle == ullx) + ullx = ulld; + +#pragma omp atomic compare acquire + llx = llx > lle ? lle : llx; +#pragma omp atomic compare acquire + llx = llx < lle ? lle : llx; +#pragma omp atomic compare acquire + llx = lle > llx ? lle : llx; +#pragma omp atomic compare acquire + llx = lle < llx ? lle : llx; +#pragma omp atomic compare acquire + if (llx > lle) + llx = lle; +#pragma omp atomic compare acquire + if (llx < lle) + llx = lle; +#pragma omp atomic compare acquire + if (lle > llx) + llx = lle; +#pragma omp atomic compare acquire + if (lle < llx) + llx = lle; + +#pragma omp atomic compare acquire + llx = llx == lle ? lld : llx; +#pragma omp atomic compare acquire + llx = lle == llx ? lld : llx; +#pragma omp atomic compare acquire + if (llx == lle) + llx = lld; +#pragma omp atomic compare acquire + if (lle == llx) + llx = lld; + +#pragma omp atomic compare acquire + ullx = ullx > ulle ? ulle : ullx; +#pragma omp atomic compare acquire + ullx = ullx < ulle ? ulle : ullx; +#pragma omp atomic compare acquire + ullx = ulle > ullx ? ulle : ullx; +#pragma omp atomic compare acquire + ullx = ulle < ullx ? ulle : ullx; +#pragma omp atomic compare acquire + if (ullx > ulle) + ullx = ulle; +#pragma omp atomic compare acquire + if (ullx < ulle) + ullx = ulle; +#pragma omp atomic compare acquire + if (ulle > ullx) + ullx = ulle; +#pragma omp atomic compare acquire + if (ulle < ullx) + ullx = ulle; + +#pragma omp atomic compare acquire + ullx = ullx == ulle ? ulld : ullx; +#pragma omp atomic compare acquire + ullx = ulle == ullx ? ulld : ullx; +#pragma omp atomic compare acquire + if (ullx == ulle) + ullx = ulld; +#pragma omp atomic compare acquire + if (ulle == ullx) + ullx = ulld; + +#pragma omp atomic compare relaxed + llx = llx > lle ? lle : llx; +#pragma omp atomic compare relaxed + llx = llx < lle ? lle : llx; +#pragma omp atomic compare relaxed + llx = lle > llx ? lle : llx; +#pragma omp atomic compare relaxed + llx = lle < llx ? lle : llx; +#pragma omp atomic compare relaxed + if (llx > lle) + llx = lle; +#pragma omp atomic compare relaxed + if (llx < lle) + llx = lle; +#pragma omp atomic compare relaxed + if (lle > llx) + llx = lle; +#pragma omp atomic compare relaxed + if (lle < llx) + llx = lle; + +#pragma omp atomic compare relaxed + llx = llx == lle ? lld : llx; +#pragma omp atomic compare relaxed + llx = lle == llx ? lld : llx; +#pragma omp atomic compare relaxed + if (llx == lle) + llx = lld; +#pragma omp atomic compare relaxed + if (lle == llx) + llx = lld; + +#pragma omp atomic compare relaxed + ullx = ullx > ulle ? ulle : ullx; +#pragma omp atomic compare relaxed + ullx = ullx < ulle ? ulle : ullx; +#pragma omp atomic compare relaxed + ullx = ulle > ullx ? ulle : ullx; +#pragma omp atomic compare relaxed + ullx = ulle < ullx ? ulle : ullx; +#pragma omp atomic compare relaxed + if (ullx > ulle) + ullx = ulle; +#pragma omp atomic compare relaxed + if (ullx < ulle) + ullx = ulle; +#pragma omp atomic compare relaxed + if (ulle > ullx) + ullx = ulle; +#pragma omp atomic compare relaxed + if (ulle < ullx) + ullx = ulle; + +#pragma omp atomic compare relaxed + ullx = ullx == ulle ? ulld : ullx; +#pragma omp atomic compare relaxed + ullx = ulle == ullx ? ulld : ullx; +#pragma omp atomic compare relaxed + if (ullx == ulle) + ullx = ulld; +#pragma omp atomic compare relaxed + if (ulle == ullx) + ullx = ulld; + +#pragma omp atomic compare release + llx = llx > lle ? lle : llx; +#pragma omp atomic compare release + llx = llx < lle ? lle : llx; +#pragma omp atomic compare release + llx = lle > llx ? lle : llx; +#pragma omp atomic compare release + llx = lle < llx ? lle : llx; +#pragma omp atomic compare release + if (llx > lle) + llx = lle; +#pragma omp atomic compare release + if (llx < lle) + llx = lle; +#pragma omp atomic compare release + if (lle > llx) + llx = lle; +#pragma omp atomic compare release + if (lle < llx) + llx = lle; + +#pragma omp atomic compare release + llx = llx == lle ? lld : llx; +#pragma omp atomic compare release + llx = lle == llx ? lld : llx; +#pragma omp atomic compare release + if (llx == lle) + llx = lld; +#pragma omp atomic compare release + if (lle == llx) + llx = lld; + +#pragma omp atomic compare release + ullx = ullx > ulle ? ulle : ullx; +#pragma omp atomic compare release + ullx = ullx < ulle ? ulle : ullx; +#pragma omp atomic compare release + ullx = ulle > ullx ? ulle : ullx; +#pragma omp atomic compare release + ullx = ulle < ullx ? ulle : ullx; +#pragma omp atomic compare release + if (ullx > ulle) + ullx = ulle; +#pragma omp atomic compare release + if (ullx < ulle) + ullx = ulle; +#pragma omp atomic compare release + if (ulle > ullx) + ullx = ulle; +#pragma omp atomic compare release + if (ulle < ullx) + ullx = ulle; + +#pragma omp atomic compare release + ullx = ullx == ulle ? ulld : ullx; +#pragma omp atomic compare release + ullx = ulle == ullx ? ulld : ullx; +#pragma omp atomic compare release + if (ullx == ulle) + ullx = ulld; +#pragma omp atomic compare release + if (ulle == ullx) + ullx = ulld; + +#pragma omp atomic compare seq_cst + llx = llx > lle ? lle : llx; +#pragma omp atomic compare seq_cst + llx = llx < lle ? lle : llx; +#pragma omp atomic compare seq_cst + llx = lle > llx ? lle : llx; +#pragma omp atomic compare seq_cst + llx = lle < llx ? lle : llx; +#pragma omp atomic compare seq_cst + if (llx > lle) + llx = lle; +#pragma omp atomic compare seq_cst + if (llx < lle) + llx = lle; +#pragma omp atomic compare seq_cst + if (lle > llx) + llx = lle; +#pragma omp atomic compare seq_cst + if (lle < llx) + llx = lle; + +#pragma omp atomic compare seq_cst + llx = llx == lle ? lld : llx; +#pragma omp atomic compare seq_cst + llx = lle == llx ? lld : llx; +#pragma omp atomic compare seq_cst + if (llx == lle) + llx = lld; +#pragma omp atomic compare seq_cst + if (lle == llx) + llx = lld; + +#pragma omp atomic compare seq_cst + ullx = ullx > ulle ? ulle : ullx; +#pragma omp atomic compare seq_cst + ullx = ullx < ulle ? ulle : ullx; +#pragma omp atomic compare seq_cst + ullx = ulle > ullx ? ulle : ullx; +#pragma omp atomic compare seq_cst + ullx = ulle < ullx ? ulle : ullx; +#pragma omp atomic compare seq_cst + if (ullx > ulle) + ullx = ulle; +#pragma omp atomic compare seq_cst + if (ullx < ulle) + ullx = ulle; +#pragma omp atomic compare seq_cst + if (ulle > ullx) + ullx = ulle; +#pragma omp atomic compare seq_cst + if (ulle < ullx) + ullx = ulle; + +#pragma omp atomic compare seq_cst + ullx = ullx == ulle ? ulld : ullx; +#pragma omp atomic compare seq_cst + ullx = ulle == ullx ? ulld : ullx; +#pragma omp atomic compare seq_cst + if (ullx == ulle) + ullx = ulld; +#pragma omp atomic compare seq_cst + if (ulle == ullx) + ullx = ulld; +} + +#endif +// CHECK-LABEL: define {{[^@]+}}@foo +// CHECK-SAME: () #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[CX:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[CE:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[CD:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[UCX:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[UCE:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[UCD:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[SX:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[SE:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[SD:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[USX:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[USE:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[USD:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[IX:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[IE:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[ID:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[UIX:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[UIE:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[UID:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[LX:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LE:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LD:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULX:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULE:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULD:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LLX:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LLE:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LLD:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULLX:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULLE:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULLD:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP0]] monotonic, align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP2]] monotonic, align 1 +// CHECK-NEXT: [[TMP4:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP5:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP4]] monotonic, align 1 +// CHECK-NEXT: [[TMP6:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP7:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP6]] monotonic, align 1 +// CHECK-NEXT: [[TMP8:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP9:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP8]] monotonic, align 1 +// CHECK-NEXT: [[TMP10:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP11:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP10]] monotonic, align 1 +// CHECK-NEXT: [[TMP12:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP13:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP12]] monotonic, align 1 +// CHECK-NEXT: [[TMP14:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP15:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP14]] monotonic, align 1 +// CHECK-NEXT: [[TMP16:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP17:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP18:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP16]], i8 [[TMP17]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP19:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP20:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP21:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP19]], i8 [[TMP20]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP22:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP23:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP24:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP22]], i8 [[TMP23]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP25:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP26:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP27:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP25]], i8 [[TMP26]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP28:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP29:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP28]] monotonic, align 1 +// CHECK-NEXT: [[TMP30:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP31:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP30]] monotonic, align 1 +// CHECK-NEXT: [[TMP32:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP33:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP32]] monotonic, align 1 +// CHECK-NEXT: [[TMP34:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP35:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP34]] monotonic, align 1 +// CHECK-NEXT: [[TMP36:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP37:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP36]] monotonic, align 1 +// CHECK-NEXT: [[TMP38:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP39:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP38]] monotonic, align 1 +// CHECK-NEXT: [[TMP40:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP41:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP40]] monotonic, align 1 +// CHECK-NEXT: [[TMP42:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP43:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP42]] monotonic, align 1 +// CHECK-NEXT: [[TMP44:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP45:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP46:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP44]], i8 [[TMP45]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP47:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP48:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP49:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP47]], i8 [[TMP48]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP50:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP51:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP52:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP50]], i8 [[TMP51]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP53:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP54:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP55:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP53]], i8 [[TMP54]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP56:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP57:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP56]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1:[0-9]+]]) +// CHECK-NEXT: [[TMP58:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP59:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP58]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP60:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP61:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP60]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP62:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP63:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP62]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP64:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP65:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP64]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP66:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP67:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP66]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP68:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP69:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP68]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP70:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP71:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP70]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP72:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP73:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP74:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP72]], i8 [[TMP73]] acq_rel acquire, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP75:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP76:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP77:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP75]], i8 [[TMP76]] acq_rel acquire, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP78:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP79:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP80:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP78]], i8 [[TMP79]] acq_rel acquire, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP81:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP82:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP83:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP81]], i8 [[TMP82]] acq_rel acquire, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP84:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP85:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP84]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP86:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP87:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP86]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP88:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP89:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP88]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP90:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP91:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP90]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP92:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP93:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP92]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP94:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP95:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP94]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP96:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP97:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP96]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP98:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP99:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP98]] acq_rel, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP100:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP101:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP102:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP100]], i8 [[TMP101]] acq_rel acquire, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP103:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP104:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP105:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP103]], i8 [[TMP104]] acq_rel acquire, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP106:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP107:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP108:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP106]], i8 [[TMP107]] acq_rel acquire, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP109:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP110:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP111:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP109]], i8 [[TMP110]] acq_rel acquire, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP112:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP113:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP112]] acquire, align 1 +// CHECK-NEXT: [[TMP114:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP115:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP114]] acquire, align 1 +// CHECK-NEXT: [[TMP116:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP117:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP116]] acquire, align 1 +// CHECK-NEXT: [[TMP118:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP119:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP118]] acquire, align 1 +// CHECK-NEXT: [[TMP120:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP121:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP120]] acquire, align 1 +// CHECK-NEXT: [[TMP122:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP123:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP122]] acquire, align 1 +// CHECK-NEXT: [[TMP124:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP125:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP124]] acquire, align 1 +// CHECK-NEXT: [[TMP126:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP127:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP126]] acquire, align 1 +// CHECK-NEXT: [[TMP128:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP129:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP130:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP128]], i8 [[TMP129]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP131:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP132:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP133:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP131]], i8 [[TMP132]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP134:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP135:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP136:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP134]], i8 [[TMP135]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP137:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP138:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP139:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP137]], i8 [[TMP138]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP140:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP141:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP140]] acquire, align 1 +// CHECK-NEXT: [[TMP142:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP143:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP142]] acquire, align 1 +// CHECK-NEXT: [[TMP144:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP145:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP144]] acquire, align 1 +// CHECK-NEXT: [[TMP146:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP147:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP146]] acquire, align 1 +// CHECK-NEXT: [[TMP148:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP149:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP148]] acquire, align 1 +// CHECK-NEXT: [[TMP150:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP151:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP150]] acquire, align 1 +// CHECK-NEXT: [[TMP152:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP153:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP152]] acquire, align 1 +// CHECK-NEXT: [[TMP154:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP155:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP154]] acquire, align 1 +// CHECK-NEXT: [[TMP156:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP157:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP158:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP156]], i8 [[TMP157]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP159:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP160:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP161:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP159]], i8 [[TMP160]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP162:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP163:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP164:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP162]], i8 [[TMP163]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP165:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP166:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP167:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP165]], i8 [[TMP166]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP168:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP169:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP168]] monotonic, align 1 +// CHECK-NEXT: [[TMP170:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP171:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP170]] monotonic, align 1 +// CHECK-NEXT: [[TMP172:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP173:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP172]] monotonic, align 1 +// CHECK-NEXT: [[TMP174:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP175:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP174]] monotonic, align 1 +// CHECK-NEXT: [[TMP176:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP177:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP176]] monotonic, align 1 +// CHECK-NEXT: [[TMP178:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP179:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP178]] monotonic, align 1 +// CHECK-NEXT: [[TMP180:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP181:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP180]] monotonic, align 1 +// CHECK-NEXT: [[TMP182:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP183:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP182]] monotonic, align 1 +// CHECK-NEXT: [[TMP184:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP185:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP186:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP184]], i8 [[TMP185]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP187:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP188:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP189:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP187]], i8 [[TMP188]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP190:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP191:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP192:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP190]], i8 [[TMP191]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP193:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP194:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP195:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP193]], i8 [[TMP194]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP196:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP197:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP196]] monotonic, align 1 +// CHECK-NEXT: [[TMP198:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP199:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP198]] monotonic, align 1 +// CHECK-NEXT: [[TMP200:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP201:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP200]] monotonic, align 1 +// CHECK-NEXT: [[TMP202:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP203:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP202]] monotonic, align 1 +// CHECK-NEXT: [[TMP204:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP205:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP204]] monotonic, align 1 +// CHECK-NEXT: [[TMP206:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP207:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP206]] monotonic, align 1 +// CHECK-NEXT: [[TMP208:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP209:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP208]] monotonic, align 1 +// CHECK-NEXT: [[TMP210:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP211:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP210]] monotonic, align 1 +// CHECK-NEXT: [[TMP212:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP213:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP214:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP212]], i8 [[TMP213]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP215:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP216:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP217:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP215]], i8 [[TMP216]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP218:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP219:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP220:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP218]], i8 [[TMP219]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP221:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP222:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP223:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP221]], i8 [[TMP222]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP224:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP225:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP224]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP226:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP227:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP226]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP228:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP229:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP228]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP230:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP231:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP230]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP232:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP233:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP232]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP234:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP235:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP234]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP236:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP237:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP236]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP238:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP239:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP238]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP240:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP241:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP242:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP240]], i8 [[TMP241]] release monotonic, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP243:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP244:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP245:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP243]], i8 [[TMP244]] release monotonic, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP246:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP247:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP248:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP246]], i8 [[TMP247]] release monotonic, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP249:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP250:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP251:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP249]], i8 [[TMP250]] release monotonic, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP252:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP253:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP252]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP254:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP255:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP254]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP256:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP257:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP256]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP258:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP259:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP258]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP260:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP261:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP260]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP262:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP263:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP262]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP264:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP265:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP264]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP266:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP267:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP266]] release, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP268:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP269:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP270:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP268]], i8 [[TMP269]] release monotonic, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP271:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP272:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP273:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP271]], i8 [[TMP272]] release monotonic, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP274:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP275:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP276:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP274]], i8 [[TMP275]] release monotonic, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP277:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP278:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP279:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP277]], i8 [[TMP278]] release monotonic, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP280:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP281:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP280]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP282:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP283:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP282]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP284:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP285:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP284]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP286:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP287:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP286]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP288:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP289:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP288]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP290:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP291:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP290]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP292:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP293:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP292]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP294:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP295:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP294]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP296:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP297:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP298:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP296]], i8 [[TMP297]] seq_cst seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP299:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP300:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP301:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP299]], i8 [[TMP300]] seq_cst seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP302:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP303:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP304:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP302]], i8 [[TMP303]] seq_cst seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP305:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP306:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP307:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP305]], i8 [[TMP306]] seq_cst seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP308:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP309:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP308]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP310:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP311:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP310]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP312:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP313:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP312]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP314:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP315:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP314]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP316:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP317:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP316]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP318:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP319:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP318]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP320:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP321:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP320]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP322:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP323:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP322]] seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP324:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP325:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP326:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP324]], i8 [[TMP325]] seq_cst seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP327:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP328:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP329:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP327]], i8 [[TMP328]] seq_cst seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP330:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP331:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP332:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP330]], i8 [[TMP331]] seq_cst seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP333:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP334:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP335:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP333]], i8 [[TMP334]] seq_cst seq_cst, align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP336:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP337:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP336]] monotonic, align 2 +// CHECK-NEXT: [[TMP338:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP339:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP338]] monotonic, align 2 +// CHECK-NEXT: [[TMP340:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP341:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP340]] monotonic, align 2 +// CHECK-NEXT: [[TMP342:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP343:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP342]] monotonic, align 2 +// CHECK-NEXT: [[TMP344:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP345:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP344]] monotonic, align 2 +// CHECK-NEXT: [[TMP346:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP347:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP346]] monotonic, align 2 +// CHECK-NEXT: [[TMP348:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP349:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP348]] monotonic, align 2 +// CHECK-NEXT: [[TMP350:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP351:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP350]] monotonic, align 2 +// CHECK-NEXT: [[TMP352:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP353:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP354:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP352]], i16 [[TMP353]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP355:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP356:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP357:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP355]], i16 [[TMP356]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP358:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP359:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP360:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP358]], i16 [[TMP359]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP361:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP362:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP363:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP361]], i16 [[TMP362]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP364:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP365:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP364]] monotonic, align 2 +// CHECK-NEXT: [[TMP366:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP367:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP366]] monotonic, align 2 +// CHECK-NEXT: [[TMP368:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP369:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP368]] monotonic, align 2 +// CHECK-NEXT: [[TMP370:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP371:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP370]] monotonic, align 2 +// CHECK-NEXT: [[TMP372:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP373:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP372]] monotonic, align 2 +// CHECK-NEXT: [[TMP374:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP375:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP374]] monotonic, align 2 +// CHECK-NEXT: [[TMP376:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP377:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP376]] monotonic, align 2 +// CHECK-NEXT: [[TMP378:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP379:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP378]] monotonic, align 2 +// CHECK-NEXT: [[TMP380:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP381:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP382:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP380]], i16 [[TMP381]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP383:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP384:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP385:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP383]], i16 [[TMP384]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP386:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP387:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP388:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP386]], i16 [[TMP387]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP389:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP390:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP391:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP389]], i16 [[TMP390]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP392:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP393:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP392]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP394:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP395:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP394]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP396:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP397:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP396]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP398:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP399:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP398]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP400:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP401:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP400]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP402:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP403:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP402]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP404:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP405:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP404]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP406:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP407:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP406]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP408:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP409:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP410:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP408]], i16 [[TMP409]] acq_rel acquire, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP411:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP412:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP413:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP411]], i16 [[TMP412]] acq_rel acquire, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP414:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP415:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP416:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP414]], i16 [[TMP415]] acq_rel acquire, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP417:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP418:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP419:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP417]], i16 [[TMP418]] acq_rel acquire, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP420:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP421:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP420]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP422:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP423:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP422]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP424:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP425:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP424]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP426:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP427:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP426]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP428:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP429:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP428]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP430:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP431:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP430]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP432:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP433:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP432]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP434:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP435:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP434]] acq_rel, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP436:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP437:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP438:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP436]], i16 [[TMP437]] acq_rel acquire, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP439:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP440:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP441:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP439]], i16 [[TMP440]] acq_rel acquire, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP442:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP443:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP444:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP442]], i16 [[TMP443]] acq_rel acquire, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP445:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP446:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP447:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP445]], i16 [[TMP446]] acq_rel acquire, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP448:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP449:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP448]] acquire, align 2 +// CHECK-NEXT: [[TMP450:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP451:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP450]] acquire, align 2 +// CHECK-NEXT: [[TMP452:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP453:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP452]] acquire, align 2 +// CHECK-NEXT: [[TMP454:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP455:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP454]] acquire, align 2 +// CHECK-NEXT: [[TMP456:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP457:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP456]] acquire, align 2 +// CHECK-NEXT: [[TMP458:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP459:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP458]] acquire, align 2 +// CHECK-NEXT: [[TMP460:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP461:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP460]] acquire, align 2 +// CHECK-NEXT: [[TMP462:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP463:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP462]] acquire, align 2 +// CHECK-NEXT: [[TMP464:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP465:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP466:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP464]], i16 [[TMP465]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP467:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP468:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP469:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP467]], i16 [[TMP468]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP470:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP471:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP472:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP470]], i16 [[TMP471]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP473:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP474:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP475:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP473]], i16 [[TMP474]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP476:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP477:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP476]] acquire, align 2 +// CHECK-NEXT: [[TMP478:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP479:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP478]] acquire, align 2 +// CHECK-NEXT: [[TMP480:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP481:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP480]] acquire, align 2 +// CHECK-NEXT: [[TMP482:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP483:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP482]] acquire, align 2 +// CHECK-NEXT: [[TMP484:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP485:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP484]] acquire, align 2 +// CHECK-NEXT: [[TMP486:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP487:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP486]] acquire, align 2 +// CHECK-NEXT: [[TMP488:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP489:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP488]] acquire, align 2 +// CHECK-NEXT: [[TMP490:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP491:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP490]] acquire, align 2 +// CHECK-NEXT: [[TMP492:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP493:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP494:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP492]], i16 [[TMP493]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP495:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP496:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP497:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP495]], i16 [[TMP496]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP498:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP499:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP500:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP498]], i16 [[TMP499]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP501:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP502:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP503:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP501]], i16 [[TMP502]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP504:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP505:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP504]] monotonic, align 2 +// CHECK-NEXT: [[TMP506:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP507:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP506]] monotonic, align 2 +// CHECK-NEXT: [[TMP508:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP509:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP508]] monotonic, align 2 +// CHECK-NEXT: [[TMP510:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP511:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP510]] monotonic, align 2 +// CHECK-NEXT: [[TMP512:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP513:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP512]] monotonic, align 2 +// CHECK-NEXT: [[TMP514:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP515:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP514]] monotonic, align 2 +// CHECK-NEXT: [[TMP516:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP517:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP516]] monotonic, align 2 +// CHECK-NEXT: [[TMP518:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP519:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP518]] monotonic, align 2 +// CHECK-NEXT: [[TMP520:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP521:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP522:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP520]], i16 [[TMP521]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP523:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP524:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP525:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP523]], i16 [[TMP524]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP526:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP527:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP528:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP526]], i16 [[TMP527]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP529:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP530:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP531:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP529]], i16 [[TMP530]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP532:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP533:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP532]] monotonic, align 2 +// CHECK-NEXT: [[TMP534:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP535:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP534]] monotonic, align 2 +// CHECK-NEXT: [[TMP536:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP537:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP536]] monotonic, align 2 +// CHECK-NEXT: [[TMP538:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP539:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP538]] monotonic, align 2 +// CHECK-NEXT: [[TMP540:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP541:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP540]] monotonic, align 2 +// CHECK-NEXT: [[TMP542:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP543:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP542]] monotonic, align 2 +// CHECK-NEXT: [[TMP544:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP545:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP544]] monotonic, align 2 +// CHECK-NEXT: [[TMP546:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP547:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP546]] monotonic, align 2 +// CHECK-NEXT: [[TMP548:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP549:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP550:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP548]], i16 [[TMP549]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP551:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP552:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP553:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP551]], i16 [[TMP552]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP554:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP555:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP556:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP554]], i16 [[TMP555]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP557:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP558:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP559:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP557]], i16 [[TMP558]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP560:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP561:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP560]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP562:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP563:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP562]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP564:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP565:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP564]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP566:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP567:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP566]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP568:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP569:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP568]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP570:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP571:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP570]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP572:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP573:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP572]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP574:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP575:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP574]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP576:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP577:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP578:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP576]], i16 [[TMP577]] release monotonic, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP579:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP580:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP581:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP579]], i16 [[TMP580]] release monotonic, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP582:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP583:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP584:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP582]], i16 [[TMP583]] release monotonic, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP585:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP586:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP587:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP585]], i16 [[TMP586]] release monotonic, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP588:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP589:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP588]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP590:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP591:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP590]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP592:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP593:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP592]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP594:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP595:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP594]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP596:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP597:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP596]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP598:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP599:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP598]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP600:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP601:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP600]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP602:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP603:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP602]] release, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP604:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP605:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP606:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP604]], i16 [[TMP605]] release monotonic, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP607:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP608:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP609:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP607]], i16 [[TMP608]] release monotonic, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP610:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP611:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP612:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP610]], i16 [[TMP611]] release monotonic, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP613:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP614:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP615:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP613]], i16 [[TMP614]] release monotonic, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP616:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP617:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP616]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP618:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP619:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP618]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP620:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP621:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP620]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP622:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP623:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP622]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP624:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP625:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP624]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP626:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP627:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP626]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP628:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP629:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP628]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP630:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP631:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP630]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP632:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP633:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP634:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP632]], i16 [[TMP633]] seq_cst seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP635:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP636:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP637:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP635]], i16 [[TMP636]] seq_cst seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP638:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP639:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP640:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP638]], i16 [[TMP639]] seq_cst seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP641:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP642:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP643:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP641]], i16 [[TMP642]] seq_cst seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP644:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP645:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP644]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP646:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP647:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP646]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP648:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP649:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP648]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP650:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP651:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP650]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP652:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP653:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP652]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP654:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP655:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP654]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP656:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP657:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP656]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP658:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP659:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP658]] seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP660:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP661:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP662:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP660]], i16 [[TMP661]] seq_cst seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP663:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP664:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP665:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP663]], i16 [[TMP664]] seq_cst seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP666:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP667:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP668:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP666]], i16 [[TMP667]] seq_cst seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP669:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP670:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP671:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP669]], i16 [[TMP670]] seq_cst seq_cst, align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP672:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP673:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP672]] monotonic, align 4 +// CHECK-NEXT: [[TMP674:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP675:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP674]] monotonic, align 4 +// CHECK-NEXT: [[TMP676:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP677:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP676]] monotonic, align 4 +// CHECK-NEXT: [[TMP678:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP679:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP678]] monotonic, align 4 +// CHECK-NEXT: [[TMP680:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP681:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP680]] monotonic, align 4 +// CHECK-NEXT: [[TMP682:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP683:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP682]] monotonic, align 4 +// CHECK-NEXT: [[TMP684:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP685:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP684]] monotonic, align 4 +// CHECK-NEXT: [[TMP686:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP687:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP686]] monotonic, align 4 +// CHECK-NEXT: [[TMP688:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP689:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP690:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP688]], i32 [[TMP689]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP691:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP692:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP693:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP691]], i32 [[TMP692]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP694:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP695:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP696:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP694]], i32 [[TMP695]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP697:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP698:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP699:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP697]], i32 [[TMP698]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP700:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP701:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP700]] monotonic, align 4 +// CHECK-NEXT: [[TMP702:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP703:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP702]] monotonic, align 4 +// CHECK-NEXT: [[TMP704:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP705:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP704]] monotonic, align 4 +// CHECK-NEXT: [[TMP706:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP707:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP706]] monotonic, align 4 +// CHECK-NEXT: [[TMP708:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP709:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP708]] monotonic, align 4 +// CHECK-NEXT: [[TMP710:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP711:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP710]] monotonic, align 4 +// CHECK-NEXT: [[TMP712:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP713:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP712]] monotonic, align 4 +// CHECK-NEXT: [[TMP714:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP715:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP714]] monotonic, align 4 +// CHECK-NEXT: [[TMP716:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP717:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP718:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP716]], i32 [[TMP717]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP719:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP720:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP721:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP719]], i32 [[TMP720]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP722:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP723:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP724:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP722]], i32 [[TMP723]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP725:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP726:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP727:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP725]], i32 [[TMP726]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP728:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP729:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP728]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP730:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP731:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP730]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP732:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP733:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP732]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP734:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP735:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP734]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP736:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP737:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP736]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP738:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP739:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP738]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP740:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP741:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP740]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP742:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP743:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP742]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP744:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP745:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP746:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP744]], i32 [[TMP745]] acq_rel acquire, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP747:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP748:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP749:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP747]], i32 [[TMP748]] acq_rel acquire, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP750:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP751:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP752:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP750]], i32 [[TMP751]] acq_rel acquire, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP753:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP754:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP755:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP753]], i32 [[TMP754]] acq_rel acquire, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP756:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP757:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP756]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP758:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP759:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP758]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP760:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP761:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP760]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP762:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP763:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP762]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP764:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP765:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP764]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP766:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP767:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP766]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP768:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP769:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP768]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP770:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP771:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP770]] acq_rel, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP772:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP773:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP774:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP772]], i32 [[TMP773]] acq_rel acquire, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP775:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP776:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP777:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP775]], i32 [[TMP776]] acq_rel acquire, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP778:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP779:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP780:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP778]], i32 [[TMP779]] acq_rel acquire, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP781:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP782:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP783:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP781]], i32 [[TMP782]] acq_rel acquire, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP784:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP785:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP784]] acquire, align 4 +// CHECK-NEXT: [[TMP786:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP787:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP786]] acquire, align 4 +// CHECK-NEXT: [[TMP788:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP789:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP788]] acquire, align 4 +// CHECK-NEXT: [[TMP790:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP791:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP790]] acquire, align 4 +// CHECK-NEXT: [[TMP792:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP793:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP792]] acquire, align 4 +// CHECK-NEXT: [[TMP794:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP795:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP794]] acquire, align 4 +// CHECK-NEXT: [[TMP796:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP797:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP796]] acquire, align 4 +// CHECK-NEXT: [[TMP798:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP799:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP798]] acquire, align 4 +// CHECK-NEXT: [[TMP800:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP801:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP802:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP800]], i32 [[TMP801]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP803:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP804:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP805:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP803]], i32 [[TMP804]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP806:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP807:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP808:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP806]], i32 [[TMP807]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP809:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP810:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP811:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP809]], i32 [[TMP810]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP812:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP813:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP812]] acquire, align 4 +// CHECK-NEXT: [[TMP814:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP815:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP814]] acquire, align 4 +// CHECK-NEXT: [[TMP816:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP817:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP816]] acquire, align 4 +// CHECK-NEXT: [[TMP818:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP819:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP818]] acquire, align 4 +// CHECK-NEXT: [[TMP820:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP821:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP820]] acquire, align 4 +// CHECK-NEXT: [[TMP822:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP823:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP822]] acquire, align 4 +// CHECK-NEXT: [[TMP824:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP825:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP824]] acquire, align 4 +// CHECK-NEXT: [[TMP826:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP827:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP826]] acquire, align 4 +// CHECK-NEXT: [[TMP828:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP829:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP830:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP828]], i32 [[TMP829]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP831:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP832:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP833:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP831]], i32 [[TMP832]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP834:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP835:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP836:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP834]], i32 [[TMP835]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP837:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP838:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP839:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP837]], i32 [[TMP838]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP840:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP841:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP840]] monotonic, align 4 +// CHECK-NEXT: [[TMP842:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP843:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP842]] monotonic, align 4 +// CHECK-NEXT: [[TMP844:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP845:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP844]] monotonic, align 4 +// CHECK-NEXT: [[TMP846:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP847:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP846]] monotonic, align 4 +// CHECK-NEXT: [[TMP848:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP849:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP848]] monotonic, align 4 +// CHECK-NEXT: [[TMP850:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP851:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP850]] monotonic, align 4 +// CHECK-NEXT: [[TMP852:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP853:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP852]] monotonic, align 4 +// CHECK-NEXT: [[TMP854:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP855:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP854]] monotonic, align 4 +// CHECK-NEXT: [[TMP856:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP857:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP858:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP856]], i32 [[TMP857]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP859:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP860:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP861:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP859]], i32 [[TMP860]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP862:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP863:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP864:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP862]], i32 [[TMP863]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP865:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP866:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP867:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP865]], i32 [[TMP866]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP868:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP869:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP868]] monotonic, align 4 +// CHECK-NEXT: [[TMP870:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP871:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP870]] monotonic, align 4 +// CHECK-NEXT: [[TMP872:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP873:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP872]] monotonic, align 4 +// CHECK-NEXT: [[TMP874:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP875:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP874]] monotonic, align 4 +// CHECK-NEXT: [[TMP876:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP877:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP876]] monotonic, align 4 +// CHECK-NEXT: [[TMP878:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP879:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP878]] monotonic, align 4 +// CHECK-NEXT: [[TMP880:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP881:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP880]] monotonic, align 4 +// CHECK-NEXT: [[TMP882:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP883:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP882]] monotonic, align 4 +// CHECK-NEXT: [[TMP884:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP885:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP886:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP884]], i32 [[TMP885]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP887:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP888:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP889:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP887]], i32 [[TMP888]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP890:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP891:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP892:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP890]], i32 [[TMP891]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP893:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP894:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP895:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP893]], i32 [[TMP894]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP896:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP897:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP896]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP898:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP899:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP898]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP900:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP901:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP900]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP902:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP903:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP902]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP904:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP905:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP904]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP906:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP907:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP906]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP908:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP909:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP908]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP910:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP911:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP910]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP912:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP913:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP914:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP912]], i32 [[TMP913]] release monotonic, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP915:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP916:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP917:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP915]], i32 [[TMP916]] release monotonic, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP918:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP919:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP920:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP918]], i32 [[TMP919]] release monotonic, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP921:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP922:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP923:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP921]], i32 [[TMP922]] release monotonic, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP924:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP925:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP924]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP926:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP927:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP926]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP928:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP929:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP928]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP930:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP931:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP930]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP932:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP933:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP932]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP934:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP935:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP934]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP936:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP937:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP936]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP938:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP939:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP938]] release, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP940:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP941:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP942:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP940]], i32 [[TMP941]] release monotonic, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP943:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP944:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP945:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP943]], i32 [[TMP944]] release monotonic, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP946:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP947:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP948:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP946]], i32 [[TMP947]] release monotonic, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP949:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP950:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP951:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP949]], i32 [[TMP950]] release monotonic, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP952:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP953:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP952]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP954:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP955:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP954]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP956:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP957:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP956]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP958:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP959:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP958]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP960:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP961:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP960]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP962:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP963:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP962]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP964:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP965:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP964]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP966:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP967:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP966]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP968:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP969:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP970:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP968]], i32 [[TMP969]] seq_cst seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP971:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP972:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP973:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP971]], i32 [[TMP972]] seq_cst seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP974:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP975:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP976:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP974]], i32 [[TMP975]] seq_cst seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP977:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP978:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP979:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP977]], i32 [[TMP978]] seq_cst seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP980:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP981:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP980]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP982:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP983:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP982]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP984:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP985:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP984]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP986:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP987:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP986]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP988:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP989:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP988]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP990:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP991:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP990]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP992:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP993:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP992]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP994:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP995:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP994]] seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP996:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP997:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP998:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP996]], i32 [[TMP997]] seq_cst seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP999:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP1000:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP1001:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP999]], i32 [[TMP1000]] seq_cst seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1002:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP1003:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP1004:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP1002]], i32 [[TMP1003]] seq_cst seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1005:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP1006:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP1007:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP1005]], i32 [[TMP1006]] seq_cst seq_cst, align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1008:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1009:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1008]] monotonic, align 8 +// CHECK-NEXT: [[TMP1010:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1011:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1010]] monotonic, align 8 +// CHECK-NEXT: [[TMP1012:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1013:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1012]] monotonic, align 8 +// CHECK-NEXT: [[TMP1014:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1015:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1014]] monotonic, align 8 +// CHECK-NEXT: [[TMP1016:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1017:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1016]] monotonic, align 8 +// CHECK-NEXT: [[TMP1018:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1019:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1018]] monotonic, align 8 +// CHECK-NEXT: [[TMP1020:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1021:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1020]] monotonic, align 8 +// CHECK-NEXT: [[TMP1022:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1023:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1022]] monotonic, align 8 +// CHECK-NEXT: [[TMP1024:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1025:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1026:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1024]], i64 [[TMP1025]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1027:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1028:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1029:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1027]], i64 [[TMP1028]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1030:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1031:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1032:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1030]], i64 [[TMP1031]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1033:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1034:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1035:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1033]], i64 [[TMP1034]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1036:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1037:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1036]] monotonic, align 8 +// CHECK-NEXT: [[TMP1038:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1039:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1038]] monotonic, align 8 +// CHECK-NEXT: [[TMP1040:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1041:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1040]] monotonic, align 8 +// CHECK-NEXT: [[TMP1042:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1043:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1042]] monotonic, align 8 +// CHECK-NEXT: [[TMP1044:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1045:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1044]] monotonic, align 8 +// CHECK-NEXT: [[TMP1046:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1047:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1046]] monotonic, align 8 +// CHECK-NEXT: [[TMP1048:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1049:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1048]] monotonic, align 8 +// CHECK-NEXT: [[TMP1050:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1051:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1050]] monotonic, align 8 +// CHECK-NEXT: [[TMP1052:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1053:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1054:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1052]], i64 [[TMP1053]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1055:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1056:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1057:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1055]], i64 [[TMP1056]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1058:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1059:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1060:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1058]], i64 [[TMP1059]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1061:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1062:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1063:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1061]], i64 [[TMP1062]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1064:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1065:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1064]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1066:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1067:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1066]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1068:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1069:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1068]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1070:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1071:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1070]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1072:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1073:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1072]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1074:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1075:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1074]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1076:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1077:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1076]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1078:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1079:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1078]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1080:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1081:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1082:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1080]], i64 [[TMP1081]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1083:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1084:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1085:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1083]], i64 [[TMP1084]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1086:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1087:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1088:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1086]], i64 [[TMP1087]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1089:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1090:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1091:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1089]], i64 [[TMP1090]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1092:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1093:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1092]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1094:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1095:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1094]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1096:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1097:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1096]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1098:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1099:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1098]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1100:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1101:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1100]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1102:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1103:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1102]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1104:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1105:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1104]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1106:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1107:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1106]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1108:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1109:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1110:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1108]], i64 [[TMP1109]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1111:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1112:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1113:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1111]], i64 [[TMP1112]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1114:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1115:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1116:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1114]], i64 [[TMP1115]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1117:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1118:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1119:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1117]], i64 [[TMP1118]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1120:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1121:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1120]] acquire, align 8 +// CHECK-NEXT: [[TMP1122:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1123:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1122]] acquire, align 8 +// CHECK-NEXT: [[TMP1124:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1125:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1124]] acquire, align 8 +// CHECK-NEXT: [[TMP1126:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1127:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1126]] acquire, align 8 +// CHECK-NEXT: [[TMP1128:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1129:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1128]] acquire, align 8 +// CHECK-NEXT: [[TMP1130:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1131:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1130]] acquire, align 8 +// CHECK-NEXT: [[TMP1132:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1133:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1132]] acquire, align 8 +// CHECK-NEXT: [[TMP1134:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1135:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1134]] acquire, align 8 +// CHECK-NEXT: [[TMP1136:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1137:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1138:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1136]], i64 [[TMP1137]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1139:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1140:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1141:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1139]], i64 [[TMP1140]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1142:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1143:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1144:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1142]], i64 [[TMP1143]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1145:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1146:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1147:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1145]], i64 [[TMP1146]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1148:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1149:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1148]] acquire, align 8 +// CHECK-NEXT: [[TMP1150:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1151:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1150]] acquire, align 8 +// CHECK-NEXT: [[TMP1152:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1153:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1152]] acquire, align 8 +// CHECK-NEXT: [[TMP1154:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1155:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1154]] acquire, align 8 +// CHECK-NEXT: [[TMP1156:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1157:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1156]] acquire, align 8 +// CHECK-NEXT: [[TMP1158:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1159:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1158]] acquire, align 8 +// CHECK-NEXT: [[TMP1160:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1161:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1160]] acquire, align 8 +// CHECK-NEXT: [[TMP1162:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1163:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1162]] acquire, align 8 +// CHECK-NEXT: [[TMP1164:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1165:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1166:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1164]], i64 [[TMP1165]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1167:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1168:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1169:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1167]], i64 [[TMP1168]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1170:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1171:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1172:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1170]], i64 [[TMP1171]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1173:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1174:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1175:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1173]], i64 [[TMP1174]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1176:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1177:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1176]] monotonic, align 8 +// CHECK-NEXT: [[TMP1178:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1179:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1178]] monotonic, align 8 +// CHECK-NEXT: [[TMP1180:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1181:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1180]] monotonic, align 8 +// CHECK-NEXT: [[TMP1182:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1183:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1182]] monotonic, align 8 +// CHECK-NEXT: [[TMP1184:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1185:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1184]] monotonic, align 8 +// CHECK-NEXT: [[TMP1186:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1187:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1186]] monotonic, align 8 +// CHECK-NEXT: [[TMP1188:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1189:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1188]] monotonic, align 8 +// CHECK-NEXT: [[TMP1190:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1191:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1190]] monotonic, align 8 +// CHECK-NEXT: [[TMP1192:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1193:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1194:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1192]], i64 [[TMP1193]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1195:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1196:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1197:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1195]], i64 [[TMP1196]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1198:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1199:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1200:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1198]], i64 [[TMP1199]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1201:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1202:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1203:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1201]], i64 [[TMP1202]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1204:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1205:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1204]] monotonic, align 8 +// CHECK-NEXT: [[TMP1206:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1207:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1206]] monotonic, align 8 +// CHECK-NEXT: [[TMP1208:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1209:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1208]] monotonic, align 8 +// CHECK-NEXT: [[TMP1210:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1211:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1210]] monotonic, align 8 +// CHECK-NEXT: [[TMP1212:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1213:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1212]] monotonic, align 8 +// CHECK-NEXT: [[TMP1214:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1215:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1214]] monotonic, align 8 +// CHECK-NEXT: [[TMP1216:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1217:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1216]] monotonic, align 8 +// CHECK-NEXT: [[TMP1218:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1219:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1218]] monotonic, align 8 +// CHECK-NEXT: [[TMP1220:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1221:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1222:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1220]], i64 [[TMP1221]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1223:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1224:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1225:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1223]], i64 [[TMP1224]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1226:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1227:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1228:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1226]], i64 [[TMP1227]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1229:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1230:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1231:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1229]], i64 [[TMP1230]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1232:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1233:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1232]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1234:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1235:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1234]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1236:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1237:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1236]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1238:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1239:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1238]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1240:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1241:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1240]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1242:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1243:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1242]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1244:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1245:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1244]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1246:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1247:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1246]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1248:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1249:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1250:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1248]], i64 [[TMP1249]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1251:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1252:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1253:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1251]], i64 [[TMP1252]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1254:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1255:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1256:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1254]], i64 [[TMP1255]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1257:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1258:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1259:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1257]], i64 [[TMP1258]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1260:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1261:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1260]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1262:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1263:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1262]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1264:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1265:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1264]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1266:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1267:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1266]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1268:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1269:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1268]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1270:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1271:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1270]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1272:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1273:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1272]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1274:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1275:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1274]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1276:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1277:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1278:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1276]], i64 [[TMP1277]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1279:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1280:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1281:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1279]], i64 [[TMP1280]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1282:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1283:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1284:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1282]], i64 [[TMP1283]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1285:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1286:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1287:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1285]], i64 [[TMP1286]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1288:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1289:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1288]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1290:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1291:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1290]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1292:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1293:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1292]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1294:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1295:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1294]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1296:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1297:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1296]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1298:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1299:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1298]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1300:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1301:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP1300]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1302:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1303:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP1302]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1304:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1305:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1306:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1304]], i64 [[TMP1305]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1307:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1308:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1309:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1307]], i64 [[TMP1308]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1310:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1311:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1312:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1310]], i64 [[TMP1311]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1313:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP1314:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP1315:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP1313]], i64 [[TMP1314]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1316:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1317:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1316]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1318:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1319:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1318]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1320:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1321:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1320]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1322:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1323:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1322]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1324:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1325:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1324]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1326:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1327:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1326]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1328:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1329:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP1328]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1330:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1331:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP1330]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1332:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1333:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1334:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1332]], i64 [[TMP1333]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1335:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1336:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1337:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1335]], i64 [[TMP1336]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1338:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1339:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1340:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1338]], i64 [[TMP1339]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1341:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP1342:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP1343:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP1341]], i64 [[TMP1342]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1344:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1345:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1344]] monotonic, align 8 +// CHECK-NEXT: [[TMP1346:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1347:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1346]] monotonic, align 8 +// CHECK-NEXT: [[TMP1348:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1349:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1348]] monotonic, align 8 +// CHECK-NEXT: [[TMP1350:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1351:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1350]] monotonic, align 8 +// CHECK-NEXT: [[TMP1352:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1353:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1352]] monotonic, align 8 +// CHECK-NEXT: [[TMP1354:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1355:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1354]] monotonic, align 8 +// CHECK-NEXT: [[TMP1356:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1357:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1356]] monotonic, align 8 +// CHECK-NEXT: [[TMP1358:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1359:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1358]] monotonic, align 8 +// CHECK-NEXT: [[TMP1360:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1361:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1362:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1360]], i64 [[TMP1361]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1363:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1364:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1365:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1363]], i64 [[TMP1364]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1366:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1367:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1368:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1366]], i64 [[TMP1367]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1369:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1370:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1371:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1369]], i64 [[TMP1370]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1372:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1373:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1372]] monotonic, align 8 +// CHECK-NEXT: [[TMP1374:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1375:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1374]] monotonic, align 8 +// CHECK-NEXT: [[TMP1376:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1377:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1376]] monotonic, align 8 +// CHECK-NEXT: [[TMP1378:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1379:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1378]] monotonic, align 8 +// CHECK-NEXT: [[TMP1380:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1381:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1380]] monotonic, align 8 +// CHECK-NEXT: [[TMP1382:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1383:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1382]] monotonic, align 8 +// CHECK-NEXT: [[TMP1384:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1385:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1384]] monotonic, align 8 +// CHECK-NEXT: [[TMP1386:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1387:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1386]] monotonic, align 8 +// CHECK-NEXT: [[TMP1388:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1389:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1390:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1388]], i64 [[TMP1389]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1391:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1392:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1393:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1391]], i64 [[TMP1392]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1394:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1395:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1396:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1394]], i64 [[TMP1395]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1397:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1398:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1399:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1397]], i64 [[TMP1398]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1400:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1401:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1400]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1402:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1403:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1402]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1404:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1405:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1404]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1406:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1407:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1406]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1408:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1409:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1408]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1410:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1411:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1410]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1412:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1413:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1412]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1414:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1415:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1414]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1416:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1417:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1418:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1416]], i64 [[TMP1417]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1419:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1420:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1421:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1419]], i64 [[TMP1420]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1422:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1423:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1424:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1422]], i64 [[TMP1423]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1425:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1426:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1427:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1425]], i64 [[TMP1426]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1428:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1429:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1428]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1430:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1431:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1430]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1432:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1433:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1432]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1434:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1435:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1434]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1436:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1437:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1436]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1438:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1439:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1438]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1440:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1441:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1440]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1442:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1443:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1442]] acq_rel, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1444:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1445:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1446:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1444]], i64 [[TMP1445]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1447:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1448:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1449:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1447]], i64 [[TMP1448]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1450:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1451:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1452:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1450]], i64 [[TMP1451]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1453:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1454:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1455:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1453]], i64 [[TMP1454]] acq_rel acquire, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1456:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1457:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1456]] acquire, align 8 +// CHECK-NEXT: [[TMP1458:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1459:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1458]] acquire, align 8 +// CHECK-NEXT: [[TMP1460:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1461:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1460]] acquire, align 8 +// CHECK-NEXT: [[TMP1462:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1463:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1462]] acquire, align 8 +// CHECK-NEXT: [[TMP1464:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1465:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1464]] acquire, align 8 +// CHECK-NEXT: [[TMP1466:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1467:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1466]] acquire, align 8 +// CHECK-NEXT: [[TMP1468:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1469:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1468]] acquire, align 8 +// CHECK-NEXT: [[TMP1470:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1471:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1470]] acquire, align 8 +// CHECK-NEXT: [[TMP1472:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1473:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1474:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1472]], i64 [[TMP1473]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1475:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1476:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1477:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1475]], i64 [[TMP1476]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1478:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1479:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1480:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1478]], i64 [[TMP1479]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1481:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1482:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1483:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1481]], i64 [[TMP1482]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1484:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1485:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1484]] acquire, align 8 +// CHECK-NEXT: [[TMP1486:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1487:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1486]] acquire, align 8 +// CHECK-NEXT: [[TMP1488:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1489:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1488]] acquire, align 8 +// CHECK-NEXT: [[TMP1490:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1491:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1490]] acquire, align 8 +// CHECK-NEXT: [[TMP1492:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1493:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1492]] acquire, align 8 +// CHECK-NEXT: [[TMP1494:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1495:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1494]] acquire, align 8 +// CHECK-NEXT: [[TMP1496:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1497:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1496]] acquire, align 8 +// CHECK-NEXT: [[TMP1498:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1499:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1498]] acquire, align 8 +// CHECK-NEXT: [[TMP1500:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1501:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1502:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1500]], i64 [[TMP1501]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1503:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1504:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1505:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1503]], i64 [[TMP1504]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1506:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1507:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1508:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1506]], i64 [[TMP1507]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1509:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1510:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1511:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1509]], i64 [[TMP1510]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP1512:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1513:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1512]] monotonic, align 8 +// CHECK-NEXT: [[TMP1514:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1515:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1514]] monotonic, align 8 +// CHECK-NEXT: [[TMP1516:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1517:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1516]] monotonic, align 8 +// CHECK-NEXT: [[TMP1518:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1519:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1518]] monotonic, align 8 +// CHECK-NEXT: [[TMP1520:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1521:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1520]] monotonic, align 8 +// CHECK-NEXT: [[TMP1522:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1523:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1522]] monotonic, align 8 +// CHECK-NEXT: [[TMP1524:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1525:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1524]] monotonic, align 8 +// CHECK-NEXT: [[TMP1526:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1527:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1526]] monotonic, align 8 +// CHECK-NEXT: [[TMP1528:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1529:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1530:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1528]], i64 [[TMP1529]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1531:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1532:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1533:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1531]], i64 [[TMP1532]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1534:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1535:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1536:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1534]], i64 [[TMP1535]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1537:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1538:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1539:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1537]], i64 [[TMP1538]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1540:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1541:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1540]] monotonic, align 8 +// CHECK-NEXT: [[TMP1542:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1543:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1542]] monotonic, align 8 +// CHECK-NEXT: [[TMP1544:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1545:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1544]] monotonic, align 8 +// CHECK-NEXT: [[TMP1546:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1547:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1546]] monotonic, align 8 +// CHECK-NEXT: [[TMP1548:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1549:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1548]] monotonic, align 8 +// CHECK-NEXT: [[TMP1550:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1551:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1550]] monotonic, align 8 +// CHECK-NEXT: [[TMP1552:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1553:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1552]] monotonic, align 8 +// CHECK-NEXT: [[TMP1554:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1555:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1554]] monotonic, align 8 +// CHECK-NEXT: [[TMP1556:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1557:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1558:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1556]], i64 [[TMP1557]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1559:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1560:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1561:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1559]], i64 [[TMP1560]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1562:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1563:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1564:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1562]], i64 [[TMP1563]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1565:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1566:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1567:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1565]], i64 [[TMP1566]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP1568:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1569:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1568]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1570:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1571:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1570]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1572:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1573:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1572]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1574:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1575:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1574]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1576:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1577:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1576]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1578:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1579:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1578]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1580:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1581:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1580]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1582:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1583:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1582]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1584:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1585:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1586:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1584]], i64 [[TMP1585]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1587:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1588:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1589:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1587]], i64 [[TMP1588]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1590:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1591:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1592:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1590]], i64 [[TMP1591]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1593:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1594:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1595:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1593]], i64 [[TMP1594]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1596:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1597:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1596]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1598:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1599:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1598]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1600:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1601:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1600]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1602:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1603:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1602]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1604:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1605:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1604]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1606:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1607:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1606]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1608:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1609:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1608]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1610:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1611:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1610]] release, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1612:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1613:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1614:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1612]], i64 [[TMP1613]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1615:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1616:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1617:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1615]], i64 [[TMP1616]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1618:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1619:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1620:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1618]], i64 [[TMP1619]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1621:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1622:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1623:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1621]], i64 [[TMP1622]] release monotonic, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1624:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1625:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1624]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1626:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1627:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1626]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1628:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1629:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1628]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1630:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1631:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1630]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1632:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1633:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1632]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1634:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1635:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1634]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1636:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1637:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP1636]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1638:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1639:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP1638]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1640:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1641:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1642:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1640]], i64 [[TMP1641]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1643:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1644:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1645:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1643]], i64 [[TMP1644]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1646:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1647:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1648:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1646]], i64 [[TMP1647]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1649:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP1650:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP1651:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP1649]], i64 [[TMP1650]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1652:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1653:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1652]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1654:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1655:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1654]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1656:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1657:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1656]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1658:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1659:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1658]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1660:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1661:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1660]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1662:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1663:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1662]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1664:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1665:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP1664]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1666:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1667:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP1666]] seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1668:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1669:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1670:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1668]], i64 [[TMP1669]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1671:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1672:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1673:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1671]], i64 [[TMP1672]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1674:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1675:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1676:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1674]], i64 [[TMP1675]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1677:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP1678:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP1679:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1677]], i64 [[TMP1678]] seq_cst seq_cst, align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: ret void