diff --git a/clang/include/clang/AST/StmtOpenMP.h b/clang/include/clang/AST/StmtOpenMP.h --- a/clang/include/clang/AST/StmtOpenMP.h +++ b/clang/include/clang/AST/StmtOpenMP.h @@ -2848,6 +2848,9 @@ /// This field is 1 for the first(postfix) form of the expression and 0 /// otherwise. uint8_t IsPostfixUpdate : 1; + /// 1 if 'v' is updated only when the condition is false (compare capture + /// only). + uint8_t IsFailOnly : 1; } Flags; /// Build directive with the given start and end location. @@ -2872,6 +2875,7 @@ POS_UpdateExpr, POS_D, POS_Cond, + POS_R, }; /// Set 'x' part of the associated expression/statement. @@ -2884,6 +2888,8 @@ } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { Data->getChildren()[DataPositionTy::POS_V] = V; } + /// Set 'r' part of the associated expression/statement. + void setR(Expr *R) { Data->getChildren()[DataPositionTy::POS_R] = R; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { Data->getChildren()[DataPositionTy::POS_E] = E; } /// Set 'd' part of the associated expression/statement. @@ -2897,6 +2903,8 @@ Expr *X = nullptr; /// 'v' part of the associated expression/statement. Expr *V = nullptr; + // 'r' part of the associated expression/statement. + Expr *R = nullptr; /// 'expr' part of the associated expression/statement. Expr *E = nullptr; /// UE Helper expression of the form: @@ -2911,6 +2919,9 @@ bool IsXLHSInRHSPart; /// True if original value of 'x' must be stored in 'v', not an updated one. bool IsPostfixUpdate; + /// True if 'v' is updated only when the condition is false (compare capture + /// only). + bool IsFailOnly; }; /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' @@ -2963,6 +2974,9 @@ /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return Flags.IsPostfixUpdate; } + /// Return true if 'v' is updated only when the condition is evaluated false + /// (compare capture only). + bool isFailOnly() const { return Flags.IsFailOnly; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null(Data->getChildren()[DataPositionTy::POS_V]); @@ -2970,6 +2984,13 @@ const Expr *getV() const { return cast_or_null(Data->getChildren()[DataPositionTy::POS_V]); } + /// Get 'r' part of the associated expression/statement. + Expr *getR() { + return cast_or_null(Data->getChildren()[DataPositionTy::POS_R]); + } + const Expr *getR() const { + return cast_or_null(Data->getChildren()[DataPositionTy::POS_R]); + } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null(Data->getChildren()[DataPositionTy::POS_E]); diff --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp --- a/clang/lib/AST/StmtOpenMP.cpp +++ b/clang/lib/AST/StmtOpenMP.cpp @@ -877,6 +877,7 @@ Dir->setCond(Exprs.Cond); Dir->Flags.IsXLHSInRHSPart = Exprs.IsXLHSInRHSPart ? 1 : 0; Dir->Flags.IsPostfixUpdate = Exprs.IsPostfixUpdate ? 1 : 0; + Dir->Flags.IsFailOnly = Exprs.IsFailOnly ? 1 : 0; return Dir; } @@ -884,7 +885,7 @@ unsigned NumClauses, EmptyShell) { return createEmptyDirective( - C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/6); + C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/7); } OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C, diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp --- a/clang/lib/CodeGen/CGStmtOpenMP.cpp +++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp @@ -6038,8 +6038,10 @@ static void emitOMPAtomicCompareExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, const Expr *X, + const Expr *V, const Expr *R, const Expr *E, const Expr *D, const Expr *CE, bool IsXBinopExpr, + bool IsPostfixUpdate, bool IsFailOnly, SourceLocation Loc) { llvm::OpenMPIRBuilder &OMPBuilder = CGF.CGM.getOpenMPRuntime().getOMPBuilder(); @@ -6064,22 +6066,38 @@ Address XAddr = XLVal.getAddress(CGF); llvm::Value *EVal = CGF.EmitScalarExpr(E); llvm::Value *DVal = D ? CGF.EmitScalarExpr(D) : nullptr; - llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{ XAddr.getPointer(), XAddr.getElementType(), X->getType().isVolatileQualified(), X->getType()->hasSignedIntegerRepresentation()}; + llvm::OpenMPIRBuilder::AtomicOpValue VOpVal; + if (V) { + LValue LV = CGF.EmitLValue(V); + Address Addr = LV.getAddress(CGF); + VOpVal = {Addr.getPointer(), Addr.getElementType(), + V->getType().isVolatileQualified(), + V->getType()->hasSignedIntegerRepresentation()}; + } + llvm::OpenMPIRBuilder::AtomicOpValue ROpVal; + if (R) { + LValue LV = CGF.EmitLValue(R); + Address Addr = LV.getAddress(CGF); + ROpVal = {Addr.getPointer(), Addr.getElementType(), + R->getType().isVolatileQualified(), + R->getType()->hasSignedIntegerRepresentation()}; + } CGF.Builder.restoreIP(OMPBuilder.createAtomicCompare( - CGF.Builder, XOpVal, EVal, DVal, AO, Op, IsXBinopExpr)); + CGF.Builder, XOpVal, VOpVal, ROpVal, EVal, DVal, AO, Op, IsXBinopExpr, + IsPostfixUpdate, IsFailOnly)); } static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, llvm::AtomicOrdering AO, bool IsPostfixUpdate, - const Expr *X, const Expr *V, const Expr *E, - const Expr *UE, const Expr *D, const Expr *CE, - bool IsXLHSInRHSPart, bool IsCompareCapture, - SourceLocation Loc) { + const Expr *X, const Expr *V, const Expr *R, + const Expr *E, const Expr *UE, const Expr *D, + const Expr *CE, bool IsXLHSInRHSPart, + bool IsFailOnly, SourceLocation Loc) { switch (Kind) { case OMPC_read: emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); @@ -6096,15 +6114,8 @@ IsXLHSInRHSPart, Loc); break; case OMPC_compare: { - if (IsCompareCapture) { - // Emit an error here. - unsigned DiagID = CGF.CGM.getDiags().getCustomDiagID( - DiagnosticsEngine::Error, - "'atomic compare capture' is not supported for now"); - CGF.CGM.getDiags().Report(DiagID); - } else { - emitOMPAtomicCompareExpr(CGF, AO, X, E, D, CE, IsXLHSInRHSPart, Loc); - } + emitOMPAtomicCompareExpr(CGF, AO, X, V, R, E, D, CE, IsXLHSInRHSPart, + IsPostfixUpdate, IsFailOnly, Loc); break; } case OMPC_if: @@ -6230,12 +6241,12 @@ Kind = K; KindsEncountered.insert(K); } - bool IsCompareCapture = false; + // We just need to correct Kind here. No need to set a bool saying it is + // actually compare capture because we can tell from whether V and R are + // nullptr. if (KindsEncountered.contains(OMPC_compare) && - KindsEncountered.contains(OMPC_capture)) { - IsCompareCapture = true; + KindsEncountered.contains(OMPC_capture)) Kind = OMPC_compare; - } if (!MemOrderingSpecified) { llvm::AtomicOrdering DefaultOrder = CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); @@ -6257,8 +6268,9 @@ LexicalScope Scope(*this, S.getSourceRange()); EmitStopPoint(S.getAssociatedStmt()); emitOMPAtomicExpr(*this, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), - S.getExpr(), S.getUpdateExpr(), S.getD(), S.getCondExpr(), - S.isXLHSInRHSPart(), IsCompareCapture, S.getBeginLoc()); + S.getR(), S.getExpr(), S.getUpdateExpr(), S.getD(), + S.getCondExpr(), S.isXLHSInRHSPart(), S.isFailOnly(), + S.getBeginLoc()); } static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp --- a/clang/lib/Sema/SemaOpenMP.cpp +++ b/clang/lib/Sema/SemaOpenMP.cpp @@ -11576,6 +11576,7 @@ Expr *getV() const { return V; } Expr *getR() const { return R; } bool isFailOnly() const { return IsFailOnly; } + bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Check if statement \a S is valid for atomic compare capture. bool checkStmt(Stmt *S, ErrorInfoTy &ErrorInfo); @@ -11605,6 +11606,8 @@ Expr *R = nullptr; /// If 'v' is only updated when the comparison fails. bool IsFailOnly = false; + /// If original value of 'x' must be stored in 'v', not an updated one. + bool IsPostfixUpdate = false; }; bool OpenMPAtomicCompareCaptureChecker::checkType(ErrorInfoTy &ErrorInfo) { @@ -11657,7 +11660,7 @@ } X = BO->getLHS(); - D = BO->getRHS(); + D = BO->getRHS()->IgnoreImpCasts(); auto *Cond = dyn_cast(S->getCond()); if (!Cond) { @@ -11674,9 +11677,9 @@ } if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS())) { - E = Cond->getRHS(); + E = Cond->getRHS()->IgnoreImpCasts(); } else if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) { - E = Cond->getLHS(); + E = Cond->getLHS()->IgnoreImpCasts(); } else { ErrorInfo.Error = ErrorTy::InvalidComparison; ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc(); @@ -11790,7 +11793,7 @@ } X = ThenBO->getLHS(); - D = ThenBO->getRHS(); + D = ThenBO->getRHS()->IgnoreImpCasts(); auto *BO = cast(S1->getRHS()->IgnoreImpCasts()); if (BO->getOpcode() != BO_EQ) { @@ -11804,9 +11807,9 @@ C = BO; if (checkIfTwoExprsAreSame(ContextRef, X, BO->getLHS())) { - E = BO->getRHS(); + E = BO->getRHS()->IgnoreImpCasts(); } else if (checkIfTwoExprsAreSame(ContextRef, X, BO->getRHS())) { - E = BO->getLHS(); + E = BO->getLHS()->IgnoreImpCasts(); } else { ErrorInfo.Error = ErrorTy::InvalidComparison; ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = BO->getExprLoc(); @@ -11910,6 +11913,8 @@ if (dyn_cast(BO->getRHS()->IgnoreImpCasts()) && dyn_cast(S2)) return checkForm45(CS, ErrorInfo); + // It cannot be set before we the check for form45. + IsPostfixUpdate = true; } else { // { cond-update-stmt v = x; } UpdateStmt = S2; @@ -12085,8 +12090,10 @@ Expr *UE = nullptr; Expr *D = nullptr; Expr *CE = nullptr; + Expr *R = nullptr; bool IsXLHSInRHSPart = false; bool IsPostfixUpdate = false; + bool IsFailOnly = false; // OpenMP [2.12.6, atomic Construct] // In the next expressions: // * x and v (as applicable) are both l-value expressions with scalar type. @@ -12482,8 +12489,16 @@ << ErrorInfo.Error << ErrorInfo.NoteRange; return StmtError(); } - // TODO: We don't set X, D, E, etc. here because in code gen we will emit - // error directly. + X = Checker.getX(); + E = Checker.getE(); + D = Checker.getD(); + CE = Checker.getCond(); + V = Checker.getV(); + R = Checker.getR(); + // We reuse IsXLHSInRHSPart to tell if it is in the form 'x ordop expr'. + IsXLHSInRHSPart = Checker.isXBinopExpr(); + IsFailOnly = Checker.isFailOnly(); + IsPostfixUpdate = Checker.isPostfixUpdate(); } else { OpenMPAtomicCompareChecker::ErrorInfoTy ErrorInfo; OpenMPAtomicCompareChecker Checker(*this); @@ -12491,7 +12506,7 @@ Diag(ErrorInfo.ErrorLoc, diag::err_omp_atomic_compare) << ErrorInfo.ErrorRange; Diag(ErrorInfo.NoteLoc, diag::note_omp_atomic_compare) - << ErrorInfo.Error << ErrorInfo.NoteRange; + << ErrorInfo.Error << ErrorInfo.NoteRange; return StmtError(); } X = Checker.getX(); @@ -12507,7 +12522,7 @@ return OMPAtomicDirective::Create( Context, StartLoc, EndLoc, Clauses, AStmt, - {X, V, E, UE, D, CE, IsXLHSInRHSPart, IsPostfixUpdate}); + {X, V, R, E, UE, D, CE, IsXLHSInRHSPart, IsPostfixUpdate, IsFailOnly}); } StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef Clauses, diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp --- a/clang/lib/Serialization/ASTReaderStmt.cpp +++ b/clang/lib/Serialization/ASTReaderStmt.cpp @@ -2451,6 +2451,7 @@ VisitOMPExecutableDirective(D); D->Flags.IsXLHSInRHSPart = Record.readBool() ? 1 : 0; D->Flags.IsPostfixUpdate = Record.readBool() ? 1 : 0; + D->Flags.IsFailOnly = Record.readBool() ? 1 : 0; } void ASTStmtReader::VisitOMPTargetDirective(OMPTargetDirective *D) { diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp --- a/clang/lib/Serialization/ASTWriterStmt.cpp +++ b/clang/lib/Serialization/ASTWriterStmt.cpp @@ -2322,6 +2322,7 @@ VisitOMPExecutableDirective(D); Record.writeBool(D->isXLHSInRHSPart()); Record.writeBool(D->isPostfixUpdate()); + Record.writeBool(D->isFailOnly()); Code = serialization::STMT_OMP_ATOMIC_DIRECTIVE; } diff --git a/clang/test/OpenMP/atomic_compare_codegen.cpp b/clang/test/OpenMP/atomic_compare_codegen.cpp --- a/clang/test/OpenMP/atomic_compare_codegen.cpp +++ b/clang/test/OpenMP/atomic_compare_codegen.cpp @@ -1945,6 +1945,7759 @@ ullx = ulld; } +void bar() { + char cx, cv, cr, ce, cd; + unsigned char ucx, ucv, ucr, uce, ucd; + short sx, sv, sr, se, sd; + unsigned short usx, usv, usr, use, usd; + int ix, iv, ir, ie, id; + unsigned int uix, uiv, uir, uie, uid; + long lx, lv, lr, le, ld; + unsigned long ulx, ulv, ulr, ule, uld; + long long llx, llv, llr, lle, lld; + unsigned long long ullx, ullv, ullr, ulle, ulld; + +#pragma omp atomic compare capture + { + cv = cx; + if (ce > cx) { + cx = ce; + } + } +#pragma omp atomic compare capture + { + cv = cx; + if (cx > ce) { + cx = ce; + } + } +#pragma omp atomic compare capture + { + cv = cx; + if (ce < cx) { + cx = ce; + } + } +#pragma omp atomic compare capture + { + cv = cx; + if (cx < ce) { + cx = ce; + } + } +#pragma omp atomic compare capture + { + cv = cx; + if (cx == ce) { + cx = cd; + } + } +#pragma omp atomic compare capture + { + cv = cx; + if (ce == cx) { + cx = cd; + } + } +#pragma omp atomic compare capture + { + if (ce > cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture + { + if (cx > ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture + { + if (ce < cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture + { + if (cx < ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture + { + if (cx == ce) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture + { + if (ce == cx) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture + if (cx == ce) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture + if (ce == cx) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture + { + cr = cx == ce; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture + { + cr = ce == cx; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture + { + cr = cx == ce; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } +#pragma omp atomic compare capture + { + cr = ce == cx; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } + +#pragma omp atomic compare capture acq_rel + { + cv = cx; + if (ce > cx) { + cx = ce; + } + } +#pragma omp atomic compare capture acq_rel + { + cv = cx; + if (cx > ce) { + cx = ce; + } + } +#pragma omp atomic compare capture acq_rel + { + cv = cx; + if (ce < cx) { + cx = ce; + } + } +#pragma omp atomic compare capture acq_rel + { + cv = cx; + if (cx < ce) { + cx = ce; + } + } +#pragma omp atomic compare capture acq_rel + { + cv = cx; + if (cx == ce) { + cx = cd; + } + } +#pragma omp atomic compare capture acq_rel + { + cv = cx; + if (ce == cx) { + cx = cd; + } + } +#pragma omp atomic compare capture acq_rel + { + if (ce > cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture acq_rel + { + if (cx > ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture acq_rel + { + if (ce < cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture acq_rel + { + if (cx < ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture acq_rel + { + if (cx == ce) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture acq_rel + { + if (ce == cx) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture acq_rel + if (cx == ce) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture acq_rel + if (ce == cx) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture acq_rel + { + cr = cx == ce; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture acq_rel + { + cr = ce == cx; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture acq_rel + { + cr = cx == ce; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } +#pragma omp atomic compare capture acq_rel + { + cr = ce == cx; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } + +#pragma omp atomic compare capture acquire + { + cv = cx; + if (ce > cx) { + cx = ce; + } + } +#pragma omp atomic compare capture acquire + { + cv = cx; + if (cx > ce) { + cx = ce; + } + } +#pragma omp atomic compare capture acquire + { + cv = cx; + if (ce < cx) { + cx = ce; + } + } +#pragma omp atomic compare capture acquire + { + cv = cx; + if (cx < ce) { + cx = ce; + } + } +#pragma omp atomic compare capture acquire + { + cv = cx; + if (cx == ce) { + cx = cd; + } + } +#pragma omp atomic compare capture acquire + { + cv = cx; + if (ce == cx) { + cx = cd; + } + } +#pragma omp atomic compare capture acquire + { + if (ce > cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture acquire + { + if (cx > ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture acquire + { + if (ce < cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture acquire + { + if (cx < ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture acquire + { + if (cx == ce) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture acquire + { + if (ce == cx) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture acquire + if (cx == ce) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture acquire + if (ce == cx) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture acquire + { + cr = cx == ce; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture acquire + { + cr = ce == cx; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture acquire + { + cr = cx == ce; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } +#pragma omp atomic compare capture acquire + { + cr = ce == cx; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } + +#pragma omp atomic compare capture relaxed + { + cv = cx; + if (ce > cx) { + cx = ce; + } + } +#pragma omp atomic compare capture relaxed + { + cv = cx; + if (cx > ce) { + cx = ce; + } + } +#pragma omp atomic compare capture relaxed + { + cv = cx; + if (ce < cx) { + cx = ce; + } + } +#pragma omp atomic compare capture relaxed + { + cv = cx; + if (cx < ce) { + cx = ce; + } + } +#pragma omp atomic compare capture relaxed + { + cv = cx; + if (cx == ce) { + cx = cd; + } + } +#pragma omp atomic compare capture relaxed + { + cv = cx; + if (ce == cx) { + cx = cd; + } + } +#pragma omp atomic compare capture relaxed + { + if (ce > cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture relaxed + { + if (cx > ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture relaxed + { + if (ce < cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture relaxed + { + if (cx < ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture relaxed + { + if (cx == ce) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture relaxed + { + if (ce == cx) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture relaxed + if (cx == ce) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture relaxed + if (ce == cx) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture relaxed + { + cr = cx == ce; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture relaxed + { + cr = ce == cx; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture relaxed + { + cr = cx == ce; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } +#pragma omp atomic compare capture relaxed + { + cr = ce == cx; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } + +#pragma omp atomic compare capture release + { + cv = cx; + if (ce > cx) { + cx = ce; + } + } +#pragma omp atomic compare capture release + { + cv = cx; + if (cx > ce) { + cx = ce; + } + } +#pragma omp atomic compare capture release + { + cv = cx; + if (ce < cx) { + cx = ce; + } + } +#pragma omp atomic compare capture release + { + cv = cx; + if (cx < ce) { + cx = ce; + } + } +#pragma omp atomic compare capture release + { + cv = cx; + if (cx == ce) { + cx = cd; + } + } +#pragma omp atomic compare capture release + { + cv = cx; + if (ce == cx) { + cx = cd; + } + } +#pragma omp atomic compare capture release + { + if (ce > cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture release + { + if (cx > ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture release + { + if (ce < cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture release + { + if (cx < ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture release + { + if (cx == ce) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture release + { + if (ce == cx) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture release + if (cx == ce) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture release + if (ce == cx) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture release + { + cr = cx == ce; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture release + { + cr = ce == cx; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture release + { + cr = cx == ce; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } +#pragma omp atomic compare capture release + { + cr = ce == cx; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } + +#pragma omp atomic compare capture seq_cst + { + cv = cx; + if (ce > cx) { + cx = ce; + } + } +#pragma omp atomic compare capture seq_cst + { + cv = cx; + if (cx > ce) { + cx = ce; + } + } +#pragma omp atomic compare capture seq_cst + { + cv = cx; + if (ce < cx) { + cx = ce; + } + } +#pragma omp atomic compare capture seq_cst + { + cv = cx; + if (cx < ce) { + cx = ce; + } + } +#pragma omp atomic compare capture seq_cst + { + cv = cx; + if (cx == ce) { + cx = cd; + } + } +#pragma omp atomic compare capture seq_cst + { + cv = cx; + if (ce == cx) { + cx = cd; + } + } +#pragma omp atomic compare capture seq_cst + { + if (ce > cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture seq_cst + { + if (cx > ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture seq_cst + { + if (ce < cx) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture seq_cst + { + if (cx < ce) { + cx = ce; + } + cv = cx; + } +#pragma omp atomic compare capture seq_cst + { + if (cx == ce) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture seq_cst + { + if (ce == cx) { + cx = cd; + } + cv = cx; + } +#pragma omp atomic compare capture seq_cst + if (cx == ce) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture seq_cst + if (ce == cx) { + cx = cd; + } else { + cv = cx; + } +#pragma omp atomic compare capture seq_cst + { + cr = cx == ce; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture seq_cst + { + cr = ce == cx; + if (cr) { + cx = cd; + } + } +#pragma omp atomic compare capture seq_cst + { + cr = cx == ce; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } +#pragma omp atomic compare capture seq_cst + { + cr = ce == cx; + if (cr) { + cx = cd; + } else { + cv = cx; + } + } + +#pragma omp atomic compare capture + { + ucv = ucx; + if (uce > ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture + { + ucv = ucx; + if (ucx > uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture + { + ucv = ucx; + if (uce < ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture + { + ucv = ucx; + if (ucx < uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture + { + ucv = ucx; + if (ucx == uce) { + ucx = ucd; + } + } +#pragma omp atomic compare capture + { + ucv = ucx; + if (uce == ucx) { + ucx = ucd; + } + } +#pragma omp atomic compare capture + { + if (uce > ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture + { + if (ucx > uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture + { + if (uce < ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture + { + if (ucx < uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture + { + if (ucx == uce) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture + { + if (uce == ucx) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture + if (ucx == uce) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture + if (uce == ucx) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } +#pragma omp atomic compare capture + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } + +#pragma omp atomic compare capture acq_rel + { + ucv = ucx; + if (uce > ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture acq_rel + { + ucv = ucx; + if (ucx > uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture acq_rel + { + ucv = ucx; + if (uce < ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture acq_rel + { + ucv = ucx; + if (ucx < uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture acq_rel + { + ucv = ucx; + if (ucx == uce) { + ucx = ucd; + } + } +#pragma omp atomic compare capture acq_rel + { + ucv = ucx; + if (uce == ucx) { + ucx = ucd; + } + } +#pragma omp atomic compare capture acq_rel + { + if (uce > ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture acq_rel + { + if (ucx > uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture acq_rel + { + if (uce < ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture acq_rel + { + if (ucx < uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture acq_rel + { + if (ucx == uce) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture acq_rel + { + if (uce == ucx) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture acq_rel + if (ucx == uce) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture acq_rel + if (uce == ucx) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture acq_rel + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture acq_rel + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture acq_rel + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } +#pragma omp atomic compare capture acq_rel + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } + +#pragma omp atomic compare capture acquire + { + ucv = ucx; + if (uce > ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture acquire + { + ucv = ucx; + if (ucx > uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture acquire + { + ucv = ucx; + if (uce < ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture acquire + { + ucv = ucx; + if (ucx < uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture acquire + { + ucv = ucx; + if (ucx == uce) { + ucx = ucd; + } + } +#pragma omp atomic compare capture acquire + { + ucv = ucx; + if (uce == ucx) { + ucx = ucd; + } + } +#pragma omp atomic compare capture acquire + { + if (uce > ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture acquire + { + if (ucx > uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture acquire + { + if (uce < ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture acquire + { + if (ucx < uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture acquire + { + if (ucx == uce) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture acquire + { + if (uce == ucx) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture acquire + if (ucx == uce) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture acquire + if (uce == ucx) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture acquire + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture acquire + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture acquire + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } +#pragma omp atomic compare capture acquire + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } + +#pragma omp atomic compare capture relaxed + { + ucv = ucx; + if (uce > ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture relaxed + { + ucv = ucx; + if (ucx > uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture relaxed + { + ucv = ucx; + if (uce < ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture relaxed + { + ucv = ucx; + if (ucx < uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture relaxed + { + ucv = ucx; + if (ucx == uce) { + ucx = ucd; + } + } +#pragma omp atomic compare capture relaxed + { + ucv = ucx; + if (uce == ucx) { + ucx = ucd; + } + } +#pragma omp atomic compare capture relaxed + { + if (uce > ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture relaxed + { + if (ucx > uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture relaxed + { + if (uce < ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture relaxed + { + if (ucx < uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture relaxed + { + if (ucx == uce) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture relaxed + { + if (uce == ucx) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture relaxed + if (ucx == uce) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture relaxed + if (uce == ucx) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture relaxed + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture relaxed + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture relaxed + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } +#pragma omp atomic compare capture relaxed + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } + +#pragma omp atomic compare capture release + { + ucv = ucx; + if (uce > ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture release + { + ucv = ucx; + if (ucx > uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture release + { + ucv = ucx; + if (uce < ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture release + { + ucv = ucx; + if (ucx < uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture release + { + ucv = ucx; + if (ucx == uce) { + ucx = ucd; + } + } +#pragma omp atomic compare capture release + { + ucv = ucx; + if (uce == ucx) { + ucx = ucd; + } + } +#pragma omp atomic compare capture release + { + if (uce > ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture release + { + if (ucx > uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture release + { + if (uce < ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture release + { + if (ucx < uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture release + { + if (ucx == uce) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture release + { + if (uce == ucx) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture release + if (ucx == uce) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture release + if (uce == ucx) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture release + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture release + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture release + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } +#pragma omp atomic compare capture release + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } + +#pragma omp atomic compare capture seq_cst + { + ucv = ucx; + if (uce > ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture seq_cst + { + ucv = ucx; + if (ucx > uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture seq_cst + { + ucv = ucx; + if (uce < ucx) { + ucx = uce; + } + } +#pragma omp atomic compare capture seq_cst + { + ucv = ucx; + if (ucx < uce) { + ucx = uce; + } + } +#pragma omp atomic compare capture seq_cst + { + ucv = ucx; + if (ucx == uce) { + ucx = ucd; + } + } +#pragma omp atomic compare capture seq_cst + { + ucv = ucx; + if (uce == ucx) { + ucx = ucd; + } + } +#pragma omp atomic compare capture seq_cst + { + if (uce > ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture seq_cst + { + if (ucx > uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture seq_cst + { + if (uce < ucx) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture seq_cst + { + if (ucx < uce) { + ucx = uce; + } + ucv = ucx; + } +#pragma omp atomic compare capture seq_cst + { + if (ucx == uce) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture seq_cst + { + if (uce == ucx) { + ucx = ucd; + } + ucv = ucx; + } +#pragma omp atomic compare capture seq_cst + if (ucx == uce) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture seq_cst + if (uce == ucx) { + ucx = ucd; + } else { + ucv = ucx; + } +#pragma omp atomic compare capture seq_cst + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture seq_cst + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } + } +#pragma omp atomic compare capture seq_cst + { + ucr = ucx == uce; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } +#pragma omp atomic compare capture seq_cst + { + ucr = uce == ucx; + if (ucr) { + ucx = ucd; + } else { + ucv = ucx; + } + } + +#pragma omp atomic compare capture + { + sv = sx; + if (se > sx) { + sx = se; + } + } +#pragma omp atomic compare capture + { + sv = sx; + if (sx > se) { + sx = se; + } + } +#pragma omp atomic compare capture + { + sv = sx; + if (se < sx) { + sx = se; + } + } +#pragma omp atomic compare capture + { + sv = sx; + if (sx < se) { + sx = se; + } + } +#pragma omp atomic compare capture + { + sv = sx; + if (sx == se) { + sx = sd; + } + } +#pragma omp atomic compare capture + { + sv = sx; + if (se == sx) { + sx = sd; + } + } +#pragma omp atomic compare capture + { + if (se > sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture + { + if (sx > se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture + { + if (se < sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture + { + if (sx < se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture + { + if (sx == se) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture + { + if (se == sx) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture + if (sx == se) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture + if (se == sx) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture + { + sr = sx == se; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture + { + sr = se == sx; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture + { + sr = sx == se; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } +#pragma omp atomic compare capture + { + sr = se == sx; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } + +#pragma omp atomic compare capture acq_rel + { + sv = sx; + if (se > sx) { + sx = se; + } + } +#pragma omp atomic compare capture acq_rel + { + sv = sx; + if (sx > se) { + sx = se; + } + } +#pragma omp atomic compare capture acq_rel + { + sv = sx; + if (se < sx) { + sx = se; + } + } +#pragma omp atomic compare capture acq_rel + { + sv = sx; + if (sx < se) { + sx = se; + } + } +#pragma omp atomic compare capture acq_rel + { + sv = sx; + if (sx == se) { + sx = sd; + } + } +#pragma omp atomic compare capture acq_rel + { + sv = sx; + if (se == sx) { + sx = sd; + } + } +#pragma omp atomic compare capture acq_rel + { + if (se > sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture acq_rel + { + if (sx > se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture acq_rel + { + if (se < sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture acq_rel + { + if (sx < se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture acq_rel + { + if (sx == se) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture acq_rel + { + if (se == sx) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture acq_rel + if (sx == se) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture acq_rel + if (se == sx) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture acq_rel + { + sr = sx == se; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture acq_rel + { + sr = se == sx; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture acq_rel + { + sr = sx == se; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } +#pragma omp atomic compare capture acq_rel + { + sr = se == sx; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } + +#pragma omp atomic compare capture acquire + { + sv = sx; + if (se > sx) { + sx = se; + } + } +#pragma omp atomic compare capture acquire + { + sv = sx; + if (sx > se) { + sx = se; + } + } +#pragma omp atomic compare capture acquire + { + sv = sx; + if (se < sx) { + sx = se; + } + } +#pragma omp atomic compare capture acquire + { + sv = sx; + if (sx < se) { + sx = se; + } + } +#pragma omp atomic compare capture acquire + { + sv = sx; + if (sx == se) { + sx = sd; + } + } +#pragma omp atomic compare capture acquire + { + sv = sx; + if (se == sx) { + sx = sd; + } + } +#pragma omp atomic compare capture acquire + { + if (se > sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture acquire + { + if (sx > se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture acquire + { + if (se < sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture acquire + { + if (sx < se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture acquire + { + if (sx == se) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture acquire + { + if (se == sx) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture acquire + if (sx == se) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture acquire + if (se == sx) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture acquire + { + sr = sx == se; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture acquire + { + sr = se == sx; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture acquire + { + sr = sx == se; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } +#pragma omp atomic compare capture acquire + { + sr = se == sx; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } + +#pragma omp atomic compare capture relaxed + { + sv = sx; + if (se > sx) { + sx = se; + } + } +#pragma omp atomic compare capture relaxed + { + sv = sx; + if (sx > se) { + sx = se; + } + } +#pragma omp atomic compare capture relaxed + { + sv = sx; + if (se < sx) { + sx = se; + } + } +#pragma omp atomic compare capture relaxed + { + sv = sx; + if (sx < se) { + sx = se; + } + } +#pragma omp atomic compare capture relaxed + { + sv = sx; + if (sx == se) { + sx = sd; + } + } +#pragma omp atomic compare capture relaxed + { + sv = sx; + if (se == sx) { + sx = sd; + } + } +#pragma omp atomic compare capture relaxed + { + if (se > sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture relaxed + { + if (sx > se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture relaxed + { + if (se < sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture relaxed + { + if (sx < se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture relaxed + { + if (sx == se) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture relaxed + { + if (se == sx) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture relaxed + if (sx == se) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture relaxed + if (se == sx) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture relaxed + { + sr = sx == se; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture relaxed + { + sr = se == sx; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture relaxed + { + sr = sx == se; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } +#pragma omp atomic compare capture relaxed + { + sr = se == sx; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } + +#pragma omp atomic compare capture release + { + sv = sx; + if (se > sx) { + sx = se; + } + } +#pragma omp atomic compare capture release + { + sv = sx; + if (sx > se) { + sx = se; + } + } +#pragma omp atomic compare capture release + { + sv = sx; + if (se < sx) { + sx = se; + } + } +#pragma omp atomic compare capture release + { + sv = sx; + if (sx < se) { + sx = se; + } + } +#pragma omp atomic compare capture release + { + sv = sx; + if (sx == se) { + sx = sd; + } + } +#pragma omp atomic compare capture release + { + sv = sx; + if (se == sx) { + sx = sd; + } + } +#pragma omp atomic compare capture release + { + if (se > sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture release + { + if (sx > se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture release + { + if (se < sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture release + { + if (sx < se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture release + { + if (sx == se) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture release + { + if (se == sx) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture release + if (sx == se) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture release + if (se == sx) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture release + { + sr = sx == se; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture release + { + sr = se == sx; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture release + { + sr = sx == se; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } +#pragma omp atomic compare capture release + { + sr = se == sx; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } + +#pragma omp atomic compare capture seq_cst + { + sv = sx; + if (se > sx) { + sx = se; + } + } +#pragma omp atomic compare capture seq_cst + { + sv = sx; + if (sx > se) { + sx = se; + } + } +#pragma omp atomic compare capture seq_cst + { + sv = sx; + if (se < sx) { + sx = se; + } + } +#pragma omp atomic compare capture seq_cst + { + sv = sx; + if (sx < se) { + sx = se; + } + } +#pragma omp atomic compare capture seq_cst + { + sv = sx; + if (sx == se) { + sx = sd; + } + } +#pragma omp atomic compare capture seq_cst + { + sv = sx; + if (se == sx) { + sx = sd; + } + } +#pragma omp atomic compare capture seq_cst + { + if (se > sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture seq_cst + { + if (sx > se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture seq_cst + { + if (se < sx) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture seq_cst + { + if (sx < se) { + sx = se; + } + sv = sx; + } +#pragma omp atomic compare capture seq_cst + { + if (sx == se) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture seq_cst + { + if (se == sx) { + sx = sd; + } + sv = sx; + } +#pragma omp atomic compare capture seq_cst + if (sx == se) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture seq_cst + if (se == sx) { + sx = sd; + } else { + sv = sx; + } +#pragma omp atomic compare capture seq_cst + { + sr = sx == se; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture seq_cst + { + sr = se == sx; + if (sr) { + sx = sd; + } + } +#pragma omp atomic compare capture seq_cst + { + sr = sx == se; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } +#pragma omp atomic compare capture seq_cst + { + sr = se == sx; + if (sr) { + sx = sd; + } else { + sv = sx; + } + } + +#pragma omp atomic compare capture + { + usv = usx; + if (use > usx) { + usx = use; + } + } +#pragma omp atomic compare capture + { + usv = usx; + if (usx > use) { + usx = use; + } + } +#pragma omp atomic compare capture + { + usv = usx; + if (use < usx) { + usx = use; + } + } +#pragma omp atomic compare capture + { + usv = usx; + if (usx < use) { + usx = use; + } + } +#pragma omp atomic compare capture + { + usv = usx; + if (usx == use) { + usx = usd; + } + } +#pragma omp atomic compare capture + { + usv = usx; + if (use == usx) { + usx = usd; + } + } +#pragma omp atomic compare capture + { + if (use > usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture + { + if (usx > use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture + { + if (use < usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture + { + if (usx < use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture + { + if (usx == use) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture + { + if (use == usx) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture + if (usx == use) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture + if (use == usx) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture + { + usr = usx == use; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture + { + usr = use == usx; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture + { + usr = usx == use; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } +#pragma omp atomic compare capture + { + usr = use == usx; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } + +#pragma omp atomic compare capture acq_rel + { + usv = usx; + if (use > usx) { + usx = use; + } + } +#pragma omp atomic compare capture acq_rel + { + usv = usx; + if (usx > use) { + usx = use; + } + } +#pragma omp atomic compare capture acq_rel + { + usv = usx; + if (use < usx) { + usx = use; + } + } +#pragma omp atomic compare capture acq_rel + { + usv = usx; + if (usx < use) { + usx = use; + } + } +#pragma omp atomic compare capture acq_rel + { + usv = usx; + if (usx == use) { + usx = usd; + } + } +#pragma omp atomic compare capture acq_rel + { + usv = usx; + if (use == usx) { + usx = usd; + } + } +#pragma omp atomic compare capture acq_rel + { + if (use > usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture acq_rel + { + if (usx > use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture acq_rel + { + if (use < usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture acq_rel + { + if (usx < use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture acq_rel + { + if (usx == use) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture acq_rel + { + if (use == usx) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture acq_rel + if (usx == use) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture acq_rel + if (use == usx) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture acq_rel + { + usr = usx == use; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture acq_rel + { + usr = use == usx; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture acq_rel + { + usr = usx == use; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } +#pragma omp atomic compare capture acq_rel + { + usr = use == usx; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } + +#pragma omp atomic compare capture acquire + { + usv = usx; + if (use > usx) { + usx = use; + } + } +#pragma omp atomic compare capture acquire + { + usv = usx; + if (usx > use) { + usx = use; + } + } +#pragma omp atomic compare capture acquire + { + usv = usx; + if (use < usx) { + usx = use; + } + } +#pragma omp atomic compare capture acquire + { + usv = usx; + if (usx < use) { + usx = use; + } + } +#pragma omp atomic compare capture acquire + { + usv = usx; + if (usx == use) { + usx = usd; + } + } +#pragma omp atomic compare capture acquire + { + usv = usx; + if (use == usx) { + usx = usd; + } + } +#pragma omp atomic compare capture acquire + { + if (use > usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture acquire + { + if (usx > use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture acquire + { + if (use < usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture acquire + { + if (usx < use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture acquire + { + if (usx == use) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture acquire + { + if (use == usx) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture acquire + if (usx == use) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture acquire + if (use == usx) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture acquire + { + usr = usx == use; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture acquire + { + usr = use == usx; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture acquire + { + usr = usx == use; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } +#pragma omp atomic compare capture acquire + { + usr = use == usx; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } + +#pragma omp atomic compare capture relaxed + { + usv = usx; + if (use > usx) { + usx = use; + } + } +#pragma omp atomic compare capture relaxed + { + usv = usx; + if (usx > use) { + usx = use; + } + } +#pragma omp atomic compare capture relaxed + { + usv = usx; + if (use < usx) { + usx = use; + } + } +#pragma omp atomic compare capture relaxed + { + usv = usx; + if (usx < use) { + usx = use; + } + } +#pragma omp atomic compare capture relaxed + { + usv = usx; + if (usx == use) { + usx = usd; + } + } +#pragma omp atomic compare capture relaxed + { + usv = usx; + if (use == usx) { + usx = usd; + } + } +#pragma omp atomic compare capture relaxed + { + if (use > usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture relaxed + { + if (usx > use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture relaxed + { + if (use < usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture relaxed + { + if (usx < use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture relaxed + { + if (usx == use) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture relaxed + { + if (use == usx) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture relaxed + if (usx == use) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture relaxed + if (use == usx) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture relaxed + { + usr = usx == use; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture relaxed + { + usr = use == usx; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture relaxed + { + usr = usx == use; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } +#pragma omp atomic compare capture relaxed + { + usr = use == usx; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } + +#pragma omp atomic compare capture release + { + usv = usx; + if (use > usx) { + usx = use; + } + } +#pragma omp atomic compare capture release + { + usv = usx; + if (usx > use) { + usx = use; + } + } +#pragma omp atomic compare capture release + { + usv = usx; + if (use < usx) { + usx = use; + } + } +#pragma omp atomic compare capture release + { + usv = usx; + if (usx < use) { + usx = use; + } + } +#pragma omp atomic compare capture release + { + usv = usx; + if (usx == use) { + usx = usd; + } + } +#pragma omp atomic compare capture release + { + usv = usx; + if (use == usx) { + usx = usd; + } + } +#pragma omp atomic compare capture release + { + if (use > usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture release + { + if (usx > use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture release + { + if (use < usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture release + { + if (usx < use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture release + { + if (usx == use) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture release + { + if (use == usx) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture release + if (usx == use) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture release + if (use == usx) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture release + { + usr = usx == use; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture release + { + usr = use == usx; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture release + { + usr = usx == use; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } +#pragma omp atomic compare capture release + { + usr = use == usx; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } + +#pragma omp atomic compare capture seq_cst + { + usv = usx; + if (use > usx) { + usx = use; + } + } +#pragma omp atomic compare capture seq_cst + { + usv = usx; + if (usx > use) { + usx = use; + } + } +#pragma omp atomic compare capture seq_cst + { + usv = usx; + if (use < usx) { + usx = use; + } + } +#pragma omp atomic compare capture seq_cst + { + usv = usx; + if (usx < use) { + usx = use; + } + } +#pragma omp atomic compare capture seq_cst + { + usv = usx; + if (usx == use) { + usx = usd; + } + } +#pragma omp atomic compare capture seq_cst + { + usv = usx; + if (use == usx) { + usx = usd; + } + } +#pragma omp atomic compare capture seq_cst + { + if (use > usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture seq_cst + { + if (usx > use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture seq_cst + { + if (use < usx) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture seq_cst + { + if (usx < use) { + usx = use; + } + usv = usx; + } +#pragma omp atomic compare capture seq_cst + { + if (usx == use) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture seq_cst + { + if (use == usx) { + usx = usd; + } + usv = usx; + } +#pragma omp atomic compare capture seq_cst + if (usx == use) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture seq_cst + if (use == usx) { + usx = usd; + } else { + usv = usx; + } +#pragma omp atomic compare capture seq_cst + { + usr = usx == use; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture seq_cst + { + usr = use == usx; + if (usr) { + usx = usd; + } + } +#pragma omp atomic compare capture seq_cst + { + usr = usx == use; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } +#pragma omp atomic compare capture seq_cst + { + usr = use == usx; + if (usr) { + usx = usd; + } else { + usv = usx; + } + } + +#pragma omp atomic compare capture + { + iv = ix; + if (ie > ix) { + ix = ie; + } + } +#pragma omp atomic compare capture + { + iv = ix; + if (ix > ie) { + ix = ie; + } + } +#pragma omp atomic compare capture + { + iv = ix; + if (ie < ix) { + ix = ie; + } + } +#pragma omp atomic compare capture + { + iv = ix; + if (ix < ie) { + ix = ie; + } + } +#pragma omp atomic compare capture + { + iv = ix; + if (ix == ie) { + ix = id; + } + } +#pragma omp atomic compare capture + { + iv = ix; + if (ie == ix) { + ix = id; + } + } +#pragma omp atomic compare capture + { + if (ie > ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture + { + if (ix > ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture + { + if (ie < ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture + { + if (ix < ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture + { + if (ix == ie) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture + { + if (ie == ix) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture + if (ix == ie) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture + if (ie == ix) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture + { + ir = ix == ie; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture + { + ir = ie == ix; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture + { + ir = ix == ie; + if (ir) { + ix = id; + } else { + iv = ix; + } + } +#pragma omp atomic compare capture + { + ir = ie == ix; + if (ir) { + ix = id; + } else { + iv = ix; + } + } + +#pragma omp atomic compare capture acq_rel + { + iv = ix; + if (ie > ix) { + ix = ie; + } + } +#pragma omp atomic compare capture acq_rel + { + iv = ix; + if (ix > ie) { + ix = ie; + } + } +#pragma omp atomic compare capture acq_rel + { + iv = ix; + if (ie < ix) { + ix = ie; + } + } +#pragma omp atomic compare capture acq_rel + { + iv = ix; + if (ix < ie) { + ix = ie; + } + } +#pragma omp atomic compare capture acq_rel + { + iv = ix; + if (ix == ie) { + ix = id; + } + } +#pragma omp atomic compare capture acq_rel + { + iv = ix; + if (ie == ix) { + ix = id; + } + } +#pragma omp atomic compare capture acq_rel + { + if (ie > ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture acq_rel + { + if (ix > ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture acq_rel + { + if (ie < ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture acq_rel + { + if (ix < ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture acq_rel + { + if (ix == ie) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture acq_rel + { + if (ie == ix) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture acq_rel + if (ix == ie) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture acq_rel + if (ie == ix) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture acq_rel + { + ir = ix == ie; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture acq_rel + { + ir = ie == ix; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture acq_rel + { + ir = ix == ie; + if (ir) { + ix = id; + } else { + iv = ix; + } + } +#pragma omp atomic compare capture acq_rel + { + ir = ie == ix; + if (ir) { + ix = id; + } else { + iv = ix; + } + } + +#pragma omp atomic compare capture acquire + { + iv = ix; + if (ie > ix) { + ix = ie; + } + } +#pragma omp atomic compare capture acquire + { + iv = ix; + if (ix > ie) { + ix = ie; + } + } +#pragma omp atomic compare capture acquire + { + iv = ix; + if (ie < ix) { + ix = ie; + } + } +#pragma omp atomic compare capture acquire + { + iv = ix; + if (ix < ie) { + ix = ie; + } + } +#pragma omp atomic compare capture acquire + { + iv = ix; + if (ix == ie) { + ix = id; + } + } +#pragma omp atomic compare capture acquire + { + iv = ix; + if (ie == ix) { + ix = id; + } + } +#pragma omp atomic compare capture acquire + { + if (ie > ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture acquire + { + if (ix > ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture acquire + { + if (ie < ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture acquire + { + if (ix < ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture acquire + { + if (ix == ie) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture acquire + { + if (ie == ix) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture acquire + if (ix == ie) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture acquire + if (ie == ix) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture acquire + { + ir = ix == ie; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture acquire + { + ir = ie == ix; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture acquire + { + ir = ix == ie; + if (ir) { + ix = id; + } else { + iv = ix; + } + } +#pragma omp atomic compare capture acquire + { + ir = ie == ix; + if (ir) { + ix = id; + } else { + iv = ix; + } + } + +#pragma omp atomic compare capture relaxed + { + iv = ix; + if (ie > ix) { + ix = ie; + } + } +#pragma omp atomic compare capture relaxed + { + iv = ix; + if (ix > ie) { + ix = ie; + } + } +#pragma omp atomic compare capture relaxed + { + iv = ix; + if (ie < ix) { + ix = ie; + } + } +#pragma omp atomic compare capture relaxed + { + iv = ix; + if (ix < ie) { + ix = ie; + } + } +#pragma omp atomic compare capture relaxed + { + iv = ix; + if (ix == ie) { + ix = id; + } + } +#pragma omp atomic compare capture relaxed + { + iv = ix; + if (ie == ix) { + ix = id; + } + } +#pragma omp atomic compare capture relaxed + { + if (ie > ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture relaxed + { + if (ix > ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture relaxed + { + if (ie < ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture relaxed + { + if (ix < ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture relaxed + { + if (ix == ie) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture relaxed + { + if (ie == ix) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture relaxed + if (ix == ie) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture relaxed + if (ie == ix) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture relaxed + { + ir = ix == ie; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture relaxed + { + ir = ie == ix; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture relaxed + { + ir = ix == ie; + if (ir) { + ix = id; + } else { + iv = ix; + } + } +#pragma omp atomic compare capture relaxed + { + ir = ie == ix; + if (ir) { + ix = id; + } else { + iv = ix; + } + } + +#pragma omp atomic compare capture release + { + iv = ix; + if (ie > ix) { + ix = ie; + } + } +#pragma omp atomic compare capture release + { + iv = ix; + if (ix > ie) { + ix = ie; + } + } +#pragma omp atomic compare capture release + { + iv = ix; + if (ie < ix) { + ix = ie; + } + } +#pragma omp atomic compare capture release + { + iv = ix; + if (ix < ie) { + ix = ie; + } + } +#pragma omp atomic compare capture release + { + iv = ix; + if (ix == ie) { + ix = id; + } + } +#pragma omp atomic compare capture release + { + iv = ix; + if (ie == ix) { + ix = id; + } + } +#pragma omp atomic compare capture release + { + if (ie > ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture release + { + if (ix > ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture release + { + if (ie < ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture release + { + if (ix < ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture release + { + if (ix == ie) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture release + { + if (ie == ix) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture release + if (ix == ie) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture release + if (ie == ix) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture release + { + ir = ix == ie; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture release + { + ir = ie == ix; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture release + { + ir = ix == ie; + if (ir) { + ix = id; + } else { + iv = ix; + } + } +#pragma omp atomic compare capture release + { + ir = ie == ix; + if (ir) { + ix = id; + } else { + iv = ix; + } + } + +#pragma omp atomic compare capture seq_cst + { + iv = ix; + if (ie > ix) { + ix = ie; + } + } +#pragma omp atomic compare capture seq_cst + { + iv = ix; + if (ix > ie) { + ix = ie; + } + } +#pragma omp atomic compare capture seq_cst + { + iv = ix; + if (ie < ix) { + ix = ie; + } + } +#pragma omp atomic compare capture seq_cst + { + iv = ix; + if (ix < ie) { + ix = ie; + } + } +#pragma omp atomic compare capture seq_cst + { + iv = ix; + if (ix == ie) { + ix = id; + } + } +#pragma omp atomic compare capture seq_cst + { + iv = ix; + if (ie == ix) { + ix = id; + } + } +#pragma omp atomic compare capture seq_cst + { + if (ie > ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture seq_cst + { + if (ix > ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture seq_cst + { + if (ie < ix) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture seq_cst + { + if (ix < ie) { + ix = ie; + } + iv = ix; + } +#pragma omp atomic compare capture seq_cst + { + if (ix == ie) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture seq_cst + { + if (ie == ix) { + ix = id; + } + iv = ix; + } +#pragma omp atomic compare capture seq_cst + if (ix == ie) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture seq_cst + if (ie == ix) { + ix = id; + } else { + iv = ix; + } +#pragma omp atomic compare capture seq_cst + { + ir = ix == ie; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture seq_cst + { + ir = ie == ix; + if (ir) { + ix = id; + } + } +#pragma omp atomic compare capture seq_cst + { + ir = ix == ie; + if (ir) { + ix = id; + } else { + iv = ix; + } + } +#pragma omp atomic compare capture seq_cst + { + ir = ie == ix; + if (ir) { + ix = id; + } else { + iv = ix; + } + } + +#pragma omp atomic compare capture + { + uiv = uix; + if (uie > uix) { + uix = uie; + } + } +#pragma omp atomic compare capture + { + uiv = uix; + if (uix > uie) { + uix = uie; + } + } +#pragma omp atomic compare capture + { + uiv = uix; + if (uie < uix) { + uix = uie; + } + } +#pragma omp atomic compare capture + { + uiv = uix; + if (uix < uie) { + uix = uie; + } + } +#pragma omp atomic compare capture + { + uiv = uix; + if (uix == uie) { + uix = uid; + } + } +#pragma omp atomic compare capture + { + uiv = uix; + if (uie == uix) { + uix = uid; + } + } +#pragma omp atomic compare capture + { + if (uie > uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture + { + if (uix > uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture + { + if (uie < uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture + { + if (uix < uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture + { + if (uix == uie) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture + { + if (uie == uix) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture + if (uix == uie) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture + if (uie == uix) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture + { + uir = uix == uie; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture + { + uir = uie == uix; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture + { + uir = uix == uie; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } +#pragma omp atomic compare capture + { + uir = uie == uix; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } + +#pragma omp atomic compare capture acq_rel + { + uiv = uix; + if (uie > uix) { + uix = uie; + } + } +#pragma omp atomic compare capture acq_rel + { + uiv = uix; + if (uix > uie) { + uix = uie; + } + } +#pragma omp atomic compare capture acq_rel + { + uiv = uix; + if (uie < uix) { + uix = uie; + } + } +#pragma omp atomic compare capture acq_rel + { + uiv = uix; + if (uix < uie) { + uix = uie; + } + } +#pragma omp atomic compare capture acq_rel + { + uiv = uix; + if (uix == uie) { + uix = uid; + } + } +#pragma omp atomic compare capture acq_rel + { + uiv = uix; + if (uie == uix) { + uix = uid; + } + } +#pragma omp atomic compare capture acq_rel + { + if (uie > uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture acq_rel + { + if (uix > uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture acq_rel + { + if (uie < uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture acq_rel + { + if (uix < uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture acq_rel + { + if (uix == uie) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture acq_rel + { + if (uie == uix) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture acq_rel + if (uix == uie) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture acq_rel + if (uie == uix) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture acq_rel + { + uir = uix == uie; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture acq_rel + { + uir = uie == uix; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture acq_rel + { + uir = uix == uie; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } +#pragma omp atomic compare capture acq_rel + { + uir = uie == uix; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } + +#pragma omp atomic compare capture acquire + { + uiv = uix; + if (uie > uix) { + uix = uie; + } + } +#pragma omp atomic compare capture acquire + { + uiv = uix; + if (uix > uie) { + uix = uie; + } + } +#pragma omp atomic compare capture acquire + { + uiv = uix; + if (uie < uix) { + uix = uie; + } + } +#pragma omp atomic compare capture acquire + { + uiv = uix; + if (uix < uie) { + uix = uie; + } + } +#pragma omp atomic compare capture acquire + { + uiv = uix; + if (uix == uie) { + uix = uid; + } + } +#pragma omp atomic compare capture acquire + { + uiv = uix; + if (uie == uix) { + uix = uid; + } + } +#pragma omp atomic compare capture acquire + { + if (uie > uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture acquire + { + if (uix > uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture acquire + { + if (uie < uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture acquire + { + if (uix < uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture acquire + { + if (uix == uie) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture acquire + { + if (uie == uix) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture acquire + if (uix == uie) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture acquire + if (uie == uix) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture acquire + { + uir = uix == uie; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture acquire + { + uir = uie == uix; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture acquire + { + uir = uix == uie; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } +#pragma omp atomic compare capture acquire + { + uir = uie == uix; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } + +#pragma omp atomic compare capture relaxed + { + uiv = uix; + if (uie > uix) { + uix = uie; + } + } +#pragma omp atomic compare capture relaxed + { + uiv = uix; + if (uix > uie) { + uix = uie; + } + } +#pragma omp atomic compare capture relaxed + { + uiv = uix; + if (uie < uix) { + uix = uie; + } + } +#pragma omp atomic compare capture relaxed + { + uiv = uix; + if (uix < uie) { + uix = uie; + } + } +#pragma omp atomic compare capture relaxed + { + uiv = uix; + if (uix == uie) { + uix = uid; + } + } +#pragma omp atomic compare capture relaxed + { + uiv = uix; + if (uie == uix) { + uix = uid; + } + } +#pragma omp atomic compare capture relaxed + { + if (uie > uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture relaxed + { + if (uix > uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture relaxed + { + if (uie < uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture relaxed + { + if (uix < uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture relaxed + { + if (uix == uie) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture relaxed + { + if (uie == uix) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture relaxed + if (uix == uie) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture relaxed + if (uie == uix) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture relaxed + { + uir = uix == uie; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture relaxed + { + uir = uie == uix; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture relaxed + { + uir = uix == uie; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } +#pragma omp atomic compare capture relaxed + { + uir = uie == uix; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } + +#pragma omp atomic compare capture release + { + uiv = uix; + if (uie > uix) { + uix = uie; + } + } +#pragma omp atomic compare capture release + { + uiv = uix; + if (uix > uie) { + uix = uie; + } + } +#pragma omp atomic compare capture release + { + uiv = uix; + if (uie < uix) { + uix = uie; + } + } +#pragma omp atomic compare capture release + { + uiv = uix; + if (uix < uie) { + uix = uie; + } + } +#pragma omp atomic compare capture release + { + uiv = uix; + if (uix == uie) { + uix = uid; + } + } +#pragma omp atomic compare capture release + { + uiv = uix; + if (uie == uix) { + uix = uid; + } + } +#pragma omp atomic compare capture release + { + if (uie > uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture release + { + if (uix > uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture release + { + if (uie < uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture release + { + if (uix < uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture release + { + if (uix == uie) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture release + { + if (uie == uix) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture release + if (uix == uie) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture release + if (uie == uix) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture release + { + uir = uix == uie; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture release + { + uir = uie == uix; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture release + { + uir = uix == uie; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } +#pragma omp atomic compare capture release + { + uir = uie == uix; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } + +#pragma omp atomic compare capture seq_cst + { + uiv = uix; + if (uie > uix) { + uix = uie; + } + } +#pragma omp atomic compare capture seq_cst + { + uiv = uix; + if (uix > uie) { + uix = uie; + } + } +#pragma omp atomic compare capture seq_cst + { + uiv = uix; + if (uie < uix) { + uix = uie; + } + } +#pragma omp atomic compare capture seq_cst + { + uiv = uix; + if (uix < uie) { + uix = uie; + } + } +#pragma omp atomic compare capture seq_cst + { + uiv = uix; + if (uix == uie) { + uix = uid; + } + } +#pragma omp atomic compare capture seq_cst + { + uiv = uix; + if (uie == uix) { + uix = uid; + } + } +#pragma omp atomic compare capture seq_cst + { + if (uie > uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture seq_cst + { + if (uix > uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture seq_cst + { + if (uie < uix) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture seq_cst + { + if (uix < uie) { + uix = uie; + } + uiv = uix; + } +#pragma omp atomic compare capture seq_cst + { + if (uix == uie) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture seq_cst + { + if (uie == uix) { + uix = uid; + } + uiv = uix; + } +#pragma omp atomic compare capture seq_cst + if (uix == uie) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture seq_cst + if (uie == uix) { + uix = uid; + } else { + uiv = uix; + } +#pragma omp atomic compare capture seq_cst + { + uir = uix == uie; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture seq_cst + { + uir = uie == uix; + if (uir) { + uix = uid; + } + } +#pragma omp atomic compare capture seq_cst + { + uir = uix == uie; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } +#pragma omp atomic compare capture seq_cst + { + uir = uie == uix; + if (uir) { + uix = uid; + } else { + uiv = uix; + } + } + +#pragma omp atomic compare capture + { + lv = lx; + if (le > lx) { + lx = le; + } + } +#pragma omp atomic compare capture + { + lv = lx; + if (lx > le) { + lx = le; + } + } +#pragma omp atomic compare capture + { + lv = lx; + if (le < lx) { + lx = le; + } + } +#pragma omp atomic compare capture + { + lv = lx; + if (lx < le) { + lx = le; + } + } +#pragma omp atomic compare capture + { + lv = lx; + if (lx == le) { + lx = ld; + } + } +#pragma omp atomic compare capture + { + lv = lx; + if (le == lx) { + lx = ld; + } + } +#pragma omp atomic compare capture + { + if (le > lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture + { + if (lx > le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture + { + if (le < lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture + { + if (lx < le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture + { + if (lx == le) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture + { + if (le == lx) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture + if (lx == le) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture + if (le == lx) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture + { + lr = lx == le; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture + { + lr = le == lx; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture + { + lr = lx == le; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } +#pragma omp atomic compare capture + { + lr = le == lx; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } + +#pragma omp atomic compare capture acq_rel + { + lv = lx; + if (le > lx) { + lx = le; + } + } +#pragma omp atomic compare capture acq_rel + { + lv = lx; + if (lx > le) { + lx = le; + } + } +#pragma omp atomic compare capture acq_rel + { + lv = lx; + if (le < lx) { + lx = le; + } + } +#pragma omp atomic compare capture acq_rel + { + lv = lx; + if (lx < le) { + lx = le; + } + } +#pragma omp atomic compare capture acq_rel + { + lv = lx; + if (lx == le) { + lx = ld; + } + } +#pragma omp atomic compare capture acq_rel + { + lv = lx; + if (le == lx) { + lx = ld; + } + } +#pragma omp atomic compare capture acq_rel + { + if (le > lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture acq_rel + { + if (lx > le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture acq_rel + { + if (le < lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture acq_rel + { + if (lx < le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture acq_rel + { + if (lx == le) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture acq_rel + { + if (le == lx) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture acq_rel + if (lx == le) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture acq_rel + if (le == lx) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture acq_rel + { + lr = lx == le; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture acq_rel + { + lr = le == lx; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture acq_rel + { + lr = lx == le; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } +#pragma omp atomic compare capture acq_rel + { + lr = le == lx; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } + +#pragma omp atomic compare capture acquire + { + lv = lx; + if (le > lx) { + lx = le; + } + } +#pragma omp atomic compare capture acquire + { + lv = lx; + if (lx > le) { + lx = le; + } + } +#pragma omp atomic compare capture acquire + { + lv = lx; + if (le < lx) { + lx = le; + } + } +#pragma omp atomic compare capture acquire + { + lv = lx; + if (lx < le) { + lx = le; + } + } +#pragma omp atomic compare capture acquire + { + lv = lx; + if (lx == le) { + lx = ld; + } + } +#pragma omp atomic compare capture acquire + { + lv = lx; + if (le == lx) { + lx = ld; + } + } +#pragma omp atomic compare capture acquire + { + if (le > lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture acquire + { + if (lx > le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture acquire + { + if (le < lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture acquire + { + if (lx < le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture acquire + { + if (lx == le) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture acquire + { + if (le == lx) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture acquire + if (lx == le) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture acquire + if (le == lx) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture acquire + { + lr = lx == le; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture acquire + { + lr = le == lx; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture acquire + { + lr = lx == le; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } +#pragma omp atomic compare capture acquire + { + lr = le == lx; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } + +#pragma omp atomic compare capture relaxed + { + lv = lx; + if (le > lx) { + lx = le; + } + } +#pragma omp atomic compare capture relaxed + { + lv = lx; + if (lx > le) { + lx = le; + } + } +#pragma omp atomic compare capture relaxed + { + lv = lx; + if (le < lx) { + lx = le; + } + } +#pragma omp atomic compare capture relaxed + { + lv = lx; + if (lx < le) { + lx = le; + } + } +#pragma omp atomic compare capture relaxed + { + lv = lx; + if (lx == le) { + lx = ld; + } + } +#pragma omp atomic compare capture relaxed + { + lv = lx; + if (le == lx) { + lx = ld; + } + } +#pragma omp atomic compare capture relaxed + { + if (le > lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture relaxed + { + if (lx > le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture relaxed + { + if (le < lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture relaxed + { + if (lx < le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture relaxed + { + if (lx == le) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture relaxed + { + if (le == lx) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture relaxed + if (lx == le) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture relaxed + if (le == lx) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture relaxed + { + lr = lx == le; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture relaxed + { + lr = le == lx; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture relaxed + { + lr = lx == le; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } +#pragma omp atomic compare capture relaxed + { + lr = le == lx; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } + +#pragma omp atomic compare capture release + { + lv = lx; + if (le > lx) { + lx = le; + } + } +#pragma omp atomic compare capture release + { + lv = lx; + if (lx > le) { + lx = le; + } + } +#pragma omp atomic compare capture release + { + lv = lx; + if (le < lx) { + lx = le; + } + } +#pragma omp atomic compare capture release + { + lv = lx; + if (lx < le) { + lx = le; + } + } +#pragma omp atomic compare capture release + { + lv = lx; + if (lx == le) { + lx = ld; + } + } +#pragma omp atomic compare capture release + { + lv = lx; + if (le == lx) { + lx = ld; + } + } +#pragma omp atomic compare capture release + { + if (le > lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture release + { + if (lx > le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture release + { + if (le < lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture release + { + if (lx < le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture release + { + if (lx == le) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture release + { + if (le == lx) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture release + if (lx == le) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture release + if (le == lx) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture release + { + lr = lx == le; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture release + { + lr = le == lx; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture release + { + lr = lx == le; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } +#pragma omp atomic compare capture release + { + lr = le == lx; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } + +#pragma omp atomic compare capture seq_cst + { + lv = lx; + if (le > lx) { + lx = le; + } + } +#pragma omp atomic compare capture seq_cst + { + lv = lx; + if (lx > le) { + lx = le; + } + } +#pragma omp atomic compare capture seq_cst + { + lv = lx; + if (le < lx) { + lx = le; + } + } +#pragma omp atomic compare capture seq_cst + { + lv = lx; + if (lx < le) { + lx = le; + } + } +#pragma omp atomic compare capture seq_cst + { + lv = lx; + if (lx == le) { + lx = ld; + } + } +#pragma omp atomic compare capture seq_cst + { + lv = lx; + if (le == lx) { + lx = ld; + } + } +#pragma omp atomic compare capture seq_cst + { + if (le > lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture seq_cst + { + if (lx > le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture seq_cst + { + if (le < lx) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture seq_cst + { + if (lx < le) { + lx = le; + } + lv = lx; + } +#pragma omp atomic compare capture seq_cst + { + if (lx == le) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture seq_cst + { + if (le == lx) { + lx = ld; + } + lv = lx; + } +#pragma omp atomic compare capture seq_cst + if (lx == le) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture seq_cst + if (le == lx) { + lx = ld; + } else { + lv = lx; + } +#pragma omp atomic compare capture seq_cst + { + lr = lx == le; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture seq_cst + { + lr = le == lx; + if (lr) { + lx = ld; + } + } +#pragma omp atomic compare capture seq_cst + { + lr = lx == le; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } +#pragma omp atomic compare capture seq_cst + { + lr = le == lx; + if (lr) { + lx = ld; + } else { + lv = lx; + } + } + +#pragma omp atomic compare capture + { + ulv = ulx; + if (ule > ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture + { + ulv = ulx; + if (ulx > ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture + { + ulv = ulx; + if (ule < ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture + { + ulv = ulx; + if (ulx < ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture + { + ulv = ulx; + if (ulx == ule) { + ulx = uld; + } + } +#pragma omp atomic compare capture + { + ulv = ulx; + if (ule == ulx) { + ulx = uld; + } + } +#pragma omp atomic compare capture + { + if (ule > ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture + { + if (ulx > ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture + { + if (ule < ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture + { + if (ulx < ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture + { + if (ulx == ule) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture + { + if (ule == ulx) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture + if (ulx == ule) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture + if (ule == ulx) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } +#pragma omp atomic compare capture + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } + +#pragma omp atomic compare capture acq_rel + { + ulv = ulx; + if (ule > ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture acq_rel + { + ulv = ulx; + if (ulx > ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture acq_rel + { + ulv = ulx; + if (ule < ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture acq_rel + { + ulv = ulx; + if (ulx < ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture acq_rel + { + ulv = ulx; + if (ulx == ule) { + ulx = uld; + } + } +#pragma omp atomic compare capture acq_rel + { + ulv = ulx; + if (ule == ulx) { + ulx = uld; + } + } +#pragma omp atomic compare capture acq_rel + { + if (ule > ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture acq_rel + { + if (ulx > ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture acq_rel + { + if (ule < ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture acq_rel + { + if (ulx < ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture acq_rel + { + if (ulx == ule) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture acq_rel + { + if (ule == ulx) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture acq_rel + if (ulx == ule) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture acq_rel + if (ule == ulx) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture acq_rel + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture acq_rel + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture acq_rel + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } +#pragma omp atomic compare capture acq_rel + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } + +#pragma omp atomic compare capture acquire + { + ulv = ulx; + if (ule > ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture acquire + { + ulv = ulx; + if (ulx > ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture acquire + { + ulv = ulx; + if (ule < ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture acquire + { + ulv = ulx; + if (ulx < ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture acquire + { + ulv = ulx; + if (ulx == ule) { + ulx = uld; + } + } +#pragma omp atomic compare capture acquire + { + ulv = ulx; + if (ule == ulx) { + ulx = uld; + } + } +#pragma omp atomic compare capture acquire + { + if (ule > ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture acquire + { + if (ulx > ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture acquire + { + if (ule < ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture acquire + { + if (ulx < ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture acquire + { + if (ulx == ule) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture acquire + { + if (ule == ulx) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture acquire + if (ulx == ule) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture acquire + if (ule == ulx) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture acquire + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture acquire + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture acquire + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } +#pragma omp atomic compare capture acquire + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } + +#pragma omp atomic compare capture relaxed + { + ulv = ulx; + if (ule > ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture relaxed + { + ulv = ulx; + if (ulx > ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture relaxed + { + ulv = ulx; + if (ule < ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture relaxed + { + ulv = ulx; + if (ulx < ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture relaxed + { + ulv = ulx; + if (ulx == ule) { + ulx = uld; + } + } +#pragma omp atomic compare capture relaxed + { + ulv = ulx; + if (ule == ulx) { + ulx = uld; + } + } +#pragma omp atomic compare capture relaxed + { + if (ule > ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture relaxed + { + if (ulx > ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture relaxed + { + if (ule < ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture relaxed + { + if (ulx < ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture relaxed + { + if (ulx == ule) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture relaxed + { + if (ule == ulx) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture relaxed + if (ulx == ule) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture relaxed + if (ule == ulx) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture relaxed + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture relaxed + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture relaxed + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } +#pragma omp atomic compare capture relaxed + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } + +#pragma omp atomic compare capture release + { + ulv = ulx; + if (ule > ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture release + { + ulv = ulx; + if (ulx > ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture release + { + ulv = ulx; + if (ule < ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture release + { + ulv = ulx; + if (ulx < ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture release + { + ulv = ulx; + if (ulx == ule) { + ulx = uld; + } + } +#pragma omp atomic compare capture release + { + ulv = ulx; + if (ule == ulx) { + ulx = uld; + } + } +#pragma omp atomic compare capture release + { + if (ule > ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture release + { + if (ulx > ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture release + { + if (ule < ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture release + { + if (ulx < ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture release + { + if (ulx == ule) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture release + { + if (ule == ulx) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture release + if (ulx == ule) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture release + if (ule == ulx) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture release + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture release + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture release + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } +#pragma omp atomic compare capture release + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } + +#pragma omp atomic compare capture seq_cst + { + ulv = ulx; + if (ule > ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture seq_cst + { + ulv = ulx; + if (ulx > ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture seq_cst + { + ulv = ulx; + if (ule < ulx) { + ulx = ule; + } + } +#pragma omp atomic compare capture seq_cst + { + ulv = ulx; + if (ulx < ule) { + ulx = ule; + } + } +#pragma omp atomic compare capture seq_cst + { + ulv = ulx; + if (ulx == ule) { + ulx = uld; + } + } +#pragma omp atomic compare capture seq_cst + { + ulv = ulx; + if (ule == ulx) { + ulx = uld; + } + } +#pragma omp atomic compare capture seq_cst + { + if (ule > ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture seq_cst + { + if (ulx > ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture seq_cst + { + if (ule < ulx) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture seq_cst + { + if (ulx < ule) { + ulx = ule; + } + ulv = ulx; + } +#pragma omp atomic compare capture seq_cst + { + if (ulx == ule) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture seq_cst + { + if (ule == ulx) { + ulx = uld; + } + ulv = ulx; + } +#pragma omp atomic compare capture seq_cst + if (ulx == ule) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture seq_cst + if (ule == ulx) { + ulx = uld; + } else { + ulv = ulx; + } +#pragma omp atomic compare capture seq_cst + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture seq_cst + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } + } +#pragma omp atomic compare capture seq_cst + { + ulr = ulx == ule; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } +#pragma omp atomic compare capture seq_cst + { + ulr = ule == ulx; + if (ulr) { + ulx = uld; + } else { + ulv = ulx; + } + } + +#pragma omp atomic compare capture + { + llv = llx; + if (lle > llx) { + llx = lle; + } + } +#pragma omp atomic compare capture + { + llv = llx; + if (llx > lle) { + llx = lle; + } + } +#pragma omp atomic compare capture + { + llv = llx; + if (lle < llx) { + llx = lle; + } + } +#pragma omp atomic compare capture + { + llv = llx; + if (llx < lle) { + llx = lle; + } + } +#pragma omp atomic compare capture + { + llv = llx; + if (llx == lle) { + llx = lld; + } + } +#pragma omp atomic compare capture + { + llv = llx; + if (lle == llx) { + llx = lld; + } + } +#pragma omp atomic compare capture + { + if (lle > llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture + { + if (llx > lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture + { + if (lle < llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture + { + if (llx < lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture + { + if (llx == lle) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture + { + if (lle == llx) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture + if (llx == lle) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture + if (lle == llx) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture + { + llr = llx == lle; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture + { + llr = lle == llx; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture + { + llr = llx == lle; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } +#pragma omp atomic compare capture + { + llr = lle == llx; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } + +#pragma omp atomic compare capture acq_rel + { + llv = llx; + if (lle > llx) { + llx = lle; + } + } +#pragma omp atomic compare capture acq_rel + { + llv = llx; + if (llx > lle) { + llx = lle; + } + } +#pragma omp atomic compare capture acq_rel + { + llv = llx; + if (lle < llx) { + llx = lle; + } + } +#pragma omp atomic compare capture acq_rel + { + llv = llx; + if (llx < lle) { + llx = lle; + } + } +#pragma omp atomic compare capture acq_rel + { + llv = llx; + if (llx == lle) { + llx = lld; + } + } +#pragma omp atomic compare capture acq_rel + { + llv = llx; + if (lle == llx) { + llx = lld; + } + } +#pragma omp atomic compare capture acq_rel + { + if (lle > llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture acq_rel + { + if (llx > lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture acq_rel + { + if (lle < llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture acq_rel + { + if (llx < lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture acq_rel + { + if (llx == lle) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture acq_rel + { + if (lle == llx) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture acq_rel + if (llx == lle) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture acq_rel + if (lle == llx) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture acq_rel + { + llr = llx == lle; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture acq_rel + { + llr = lle == llx; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture acq_rel + { + llr = llx == lle; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } +#pragma omp atomic compare capture acq_rel + { + llr = lle == llx; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } + +#pragma omp atomic compare capture acquire + { + llv = llx; + if (lle > llx) { + llx = lle; + } + } +#pragma omp atomic compare capture acquire + { + llv = llx; + if (llx > lle) { + llx = lle; + } + } +#pragma omp atomic compare capture acquire + { + llv = llx; + if (lle < llx) { + llx = lle; + } + } +#pragma omp atomic compare capture acquire + { + llv = llx; + if (llx < lle) { + llx = lle; + } + } +#pragma omp atomic compare capture acquire + { + llv = llx; + if (llx == lle) { + llx = lld; + } + } +#pragma omp atomic compare capture acquire + { + llv = llx; + if (lle == llx) { + llx = lld; + } + } +#pragma omp atomic compare capture acquire + { + if (lle > llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture acquire + { + if (llx > lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture acquire + { + if (lle < llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture acquire + { + if (llx < lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture acquire + { + if (llx == lle) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture acquire + { + if (lle == llx) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture acquire + if (llx == lle) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture acquire + if (lle == llx) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture acquire + { + llr = llx == lle; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture acquire + { + llr = lle == llx; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture acquire + { + llr = llx == lle; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } +#pragma omp atomic compare capture acquire + { + llr = lle == llx; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } + +#pragma omp atomic compare capture relaxed + { + llv = llx; + if (lle > llx) { + llx = lle; + } + } +#pragma omp atomic compare capture relaxed + { + llv = llx; + if (llx > lle) { + llx = lle; + } + } +#pragma omp atomic compare capture relaxed + { + llv = llx; + if (lle < llx) { + llx = lle; + } + } +#pragma omp atomic compare capture relaxed + { + llv = llx; + if (llx < lle) { + llx = lle; + } + } +#pragma omp atomic compare capture relaxed + { + llv = llx; + if (llx == lle) { + llx = lld; + } + } +#pragma omp atomic compare capture relaxed + { + llv = llx; + if (lle == llx) { + llx = lld; + } + } +#pragma omp atomic compare capture relaxed + { + if (lle > llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture relaxed + { + if (llx > lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture relaxed + { + if (lle < llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture relaxed + { + if (llx < lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture relaxed + { + if (llx == lle) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture relaxed + { + if (lle == llx) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture relaxed + if (llx == lle) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture relaxed + if (lle == llx) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture relaxed + { + llr = llx == lle; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture relaxed + { + llr = lle == llx; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture relaxed + { + llr = llx == lle; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } +#pragma omp atomic compare capture relaxed + { + llr = lle == llx; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } + +#pragma omp atomic compare capture release + { + llv = llx; + if (lle > llx) { + llx = lle; + } + } +#pragma omp atomic compare capture release + { + llv = llx; + if (llx > lle) { + llx = lle; + } + } +#pragma omp atomic compare capture release + { + llv = llx; + if (lle < llx) { + llx = lle; + } + } +#pragma omp atomic compare capture release + { + llv = llx; + if (llx < lle) { + llx = lle; + } + } +#pragma omp atomic compare capture release + { + llv = llx; + if (llx == lle) { + llx = lld; + } + } +#pragma omp atomic compare capture release + { + llv = llx; + if (lle == llx) { + llx = lld; + } + } +#pragma omp atomic compare capture release + { + if (lle > llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture release + { + if (llx > lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture release + { + if (lle < llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture release + { + if (llx < lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture release + { + if (llx == lle) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture release + { + if (lle == llx) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture release + if (llx == lle) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture release + if (lle == llx) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture release + { + llr = llx == lle; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture release + { + llr = lle == llx; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture release + { + llr = llx == lle; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } +#pragma omp atomic compare capture release + { + llr = lle == llx; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } + +#pragma omp atomic compare capture seq_cst + { + llv = llx; + if (lle > llx) { + llx = lle; + } + } +#pragma omp atomic compare capture seq_cst + { + llv = llx; + if (llx > lle) { + llx = lle; + } + } +#pragma omp atomic compare capture seq_cst + { + llv = llx; + if (lle < llx) { + llx = lle; + } + } +#pragma omp atomic compare capture seq_cst + { + llv = llx; + if (llx < lle) { + llx = lle; + } + } +#pragma omp atomic compare capture seq_cst + { + llv = llx; + if (llx == lle) { + llx = lld; + } + } +#pragma omp atomic compare capture seq_cst + { + llv = llx; + if (lle == llx) { + llx = lld; + } + } +#pragma omp atomic compare capture seq_cst + { + if (lle > llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture seq_cst + { + if (llx > lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture seq_cst + { + if (lle < llx) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture seq_cst + { + if (llx < lle) { + llx = lle; + } + llv = llx; + } +#pragma omp atomic compare capture seq_cst + { + if (llx == lle) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture seq_cst + { + if (lle == llx) { + llx = lld; + } + llv = llx; + } +#pragma omp atomic compare capture seq_cst + if (llx == lle) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture seq_cst + if (lle == llx) { + llx = lld; + } else { + llv = llx; + } +#pragma omp atomic compare capture seq_cst + { + llr = llx == lle; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture seq_cst + { + llr = lle == llx; + if (llr) { + llx = lld; + } + } +#pragma omp atomic compare capture seq_cst + { + llr = llx == lle; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } +#pragma omp atomic compare capture seq_cst + { + llr = lle == llx; + if (llr) { + llx = lld; + } else { + llv = llx; + } + } + +#pragma omp atomic compare capture + { + ullv = ullx; + if (ulle > ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture + { + ullv = ullx; + if (ullx > ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture + { + ullv = ullx; + if (ulle < ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture + { + ullv = ullx; + if (ullx < ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture + { + ullv = ullx; + if (ullx == ulle) { + ullx = ulld; + } + } +#pragma omp atomic compare capture + { + ullv = ullx; + if (ulle == ullx) { + ullx = ulld; + } + } +#pragma omp atomic compare capture + { + if (ulle > ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture + { + if (ullx > ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture + { + if (ulle < ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture + { + if (ullx < ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture + { + if (ullx == ulle) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture + { + if (ulle == ullx) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture + if (ullx == ulle) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture + if (ulle == ullx) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } +#pragma omp atomic compare capture + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } + +#pragma omp atomic compare capture acq_rel + { + ullv = ullx; + if (ulle > ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture acq_rel + { + ullv = ullx; + if (ullx > ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture acq_rel + { + ullv = ullx; + if (ulle < ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture acq_rel + { + ullv = ullx; + if (ullx < ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture acq_rel + { + ullv = ullx; + if (ullx == ulle) { + ullx = ulld; + } + } +#pragma omp atomic compare capture acq_rel + { + ullv = ullx; + if (ulle == ullx) { + ullx = ulld; + } + } +#pragma omp atomic compare capture acq_rel + { + if (ulle > ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture acq_rel + { + if (ullx > ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture acq_rel + { + if (ulle < ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture acq_rel + { + if (ullx < ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture acq_rel + { + if (ullx == ulle) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture acq_rel + { + if (ulle == ullx) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture acq_rel + if (ullx == ulle) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture acq_rel + if (ulle == ullx) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture acq_rel + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture acq_rel + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture acq_rel + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } +#pragma omp atomic compare capture acq_rel + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } + +#pragma omp atomic compare capture acquire + { + ullv = ullx; + if (ulle > ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture acquire + { + ullv = ullx; + if (ullx > ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture acquire + { + ullv = ullx; + if (ulle < ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture acquire + { + ullv = ullx; + if (ullx < ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture acquire + { + ullv = ullx; + if (ullx == ulle) { + ullx = ulld; + } + } +#pragma omp atomic compare capture acquire + { + ullv = ullx; + if (ulle == ullx) { + ullx = ulld; + } + } +#pragma omp atomic compare capture acquire + { + if (ulle > ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture acquire + { + if (ullx > ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture acquire + { + if (ulle < ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture acquire + { + if (ullx < ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture acquire + { + if (ullx == ulle) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture acquire + { + if (ulle == ullx) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture acquire + if (ullx == ulle) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture acquire + if (ulle == ullx) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture acquire + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture acquire + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture acquire + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } +#pragma omp atomic compare capture acquire + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } + +#pragma omp atomic compare capture relaxed + { + ullv = ullx; + if (ulle > ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture relaxed + { + ullv = ullx; + if (ullx > ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture relaxed + { + ullv = ullx; + if (ulle < ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture relaxed + { + ullv = ullx; + if (ullx < ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture relaxed + { + ullv = ullx; + if (ullx == ulle) { + ullx = ulld; + } + } +#pragma omp atomic compare capture relaxed + { + ullv = ullx; + if (ulle == ullx) { + ullx = ulld; + } + } +#pragma omp atomic compare capture relaxed + { + if (ulle > ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture relaxed + { + if (ullx > ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture relaxed + { + if (ulle < ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture relaxed + { + if (ullx < ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture relaxed + { + if (ullx == ulle) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture relaxed + { + if (ulle == ullx) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture relaxed + if (ullx == ulle) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture relaxed + if (ulle == ullx) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture relaxed + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture relaxed + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture relaxed + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } +#pragma omp atomic compare capture relaxed + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } + +#pragma omp atomic compare capture release + { + ullv = ullx; + if (ulle > ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture release + { + ullv = ullx; + if (ullx > ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture release + { + ullv = ullx; + if (ulle < ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture release + { + ullv = ullx; + if (ullx < ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture release + { + ullv = ullx; + if (ullx == ulle) { + ullx = ulld; + } + } +#pragma omp atomic compare capture release + { + ullv = ullx; + if (ulle == ullx) { + ullx = ulld; + } + } +#pragma omp atomic compare capture release + { + if (ulle > ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture release + { + if (ullx > ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture release + { + if (ulle < ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture release + { + if (ullx < ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture release + { + if (ullx == ulle) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture release + { + if (ulle == ullx) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture release + if (ullx == ulle) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture release + if (ulle == ullx) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture release + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture release + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture release + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } +#pragma omp atomic compare capture release + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } + +#pragma omp atomic compare capture seq_cst + { + ullv = ullx; + if (ulle > ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture seq_cst + { + ullv = ullx; + if (ullx > ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture seq_cst + { + ullv = ullx; + if (ulle < ullx) { + ullx = ulle; + } + } +#pragma omp atomic compare capture seq_cst + { + ullv = ullx; + if (ullx < ulle) { + ullx = ulle; + } + } +#pragma omp atomic compare capture seq_cst + { + ullv = ullx; + if (ullx == ulle) { + ullx = ulld; + } + } +#pragma omp atomic compare capture seq_cst + { + ullv = ullx; + if (ulle == ullx) { + ullx = ulld; + } + } +#pragma omp atomic compare capture seq_cst + { + if (ulle > ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture seq_cst + { + if (ullx > ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture seq_cst + { + if (ulle < ullx) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture seq_cst + { + if (ullx < ulle) { + ullx = ulle; + } + ullv = ullx; + } +#pragma omp atomic compare capture seq_cst + { + if (ullx == ulle) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture seq_cst + { + if (ulle == ullx) { + ullx = ulld; + } + ullv = ullx; + } +#pragma omp atomic compare capture seq_cst + if (ullx == ulle) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture seq_cst + if (ulle == ullx) { + ullx = ulld; + } else { + ullv = ullx; + } +#pragma omp atomic compare capture seq_cst + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture seq_cst + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } + } +#pragma omp atomic compare capture seq_cst + { + ullr = ullx == ulle; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } +#pragma omp atomic compare capture seq_cst + { + ullr = ulle == ullx; + if (ullr) { + ullx = ulld; + } else { + ullv = ullx; + } + } +} + #endif // CHECK-LABEL: @foo( // CHECK-NEXT: entry: @@ -4019,3 +11772,7439 @@ // CHECK-NEXT: [[TMP1679:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP1677]], i64 [[TMP1678]] seq_cst seq_cst, align 8 // CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) // CHECK-NEXT: ret void +// +// +// CHECK-LABEL: define {{[^@]+}}@bar +// CHECK-SAME: () #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[CX:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[CV:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[CR:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[CE:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[CD:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[UCX:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[UCV:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[UCR:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[UCE:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[UCD:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[SX:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[SV:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[SR:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[SE:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[SD:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[USX:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[USV:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[USR:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[USE:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[USD:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[IX:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[IV:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[IR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[IE:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[ID:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[UIX:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[UIV:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[UIR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[UIE:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[UID:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[LX:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LV:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LR:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LE:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LD:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULX:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULV:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULR:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULE:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULD:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LLX:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LLV:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LLR:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LLE:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[LLD:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULLX:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULLV:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULLR:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULLE:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[ULLD:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP0]] monotonic, align 1 +// CHECK-NEXT: store volatile i8 [[TMP1]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP2]] monotonic, align 1 +// CHECK-NEXT: store volatile i8 [[TMP3]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP4:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP5:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP4]] monotonic, align 1 +// CHECK-NEXT: store volatile i8 [[TMP5]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP6:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP7:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP6]] monotonic, align 1 +// CHECK-NEXT: store volatile i8 [[TMP7]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP8:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP9:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP10:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP8]], i8 [[TMP9]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP11:%.*]] = extractvalue { i8, i1 } [[TMP10]], 0 +// CHECK-NEXT: store volatile i8 [[TMP11]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP12:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP13:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP14:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP12]], i8 [[TMP13]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP15:%.*]] = extractvalue { i8, i1 } [[TMP14]], 0 +// CHECK-NEXT: store volatile i8 [[TMP15]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP16:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP17:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP16]] monotonic, align 1 +// CHECK-NEXT: [[TMP18:%.*]] = icmp ugt i8 [[TMP17]], [[TMP16]] +// CHECK-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i8 [[TMP16]], i8 [[TMP17]] +// CHECK-NEXT: store volatile i8 [[TMP19]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP20:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP21:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP20]] monotonic, align 1 +// CHECK-NEXT: [[TMP22:%.*]] = icmp ult i8 [[TMP21]], [[TMP20]] +// CHECK-NEXT: [[TMP23:%.*]] = select i1 [[TMP22]], i8 [[TMP20]], i8 [[TMP21]] +// CHECK-NEXT: store volatile i8 [[TMP23]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP24:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP25:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP24]] monotonic, align 1 +// CHECK-NEXT: [[TMP26:%.*]] = icmp ult i8 [[TMP25]], [[TMP24]] +// CHECK-NEXT: [[TMP27:%.*]] = select i1 [[TMP26]], i8 [[TMP24]], i8 [[TMP25]] +// CHECK-NEXT: store volatile i8 [[TMP27]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP28:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP29:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP28]] monotonic, align 1 +// CHECK-NEXT: [[TMP30:%.*]] = icmp ugt i8 [[TMP29]], [[TMP28]] +// CHECK-NEXT: [[TMP31:%.*]] = select i1 [[TMP30]], i8 [[TMP28]], i8 [[TMP29]] +// CHECK-NEXT: store volatile i8 [[TMP31]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP32:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP33:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP34:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP32]], i8 [[TMP33]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP35:%.*]] = extractvalue { i8, i1 } [[TMP34]], 0 +// CHECK-NEXT: [[TMP36:%.*]] = extractvalue { i8, i1 } [[TMP34]], 1 +// CHECK-NEXT: [[TMP37:%.*]] = select i1 [[TMP36]], i8 [[TMP32]], i8 [[TMP35]] +// CHECK-NEXT: store volatile i8 [[TMP37]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP38:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP39:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP40:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP38]], i8 [[TMP39]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP41:%.*]] = extractvalue { i8, i1 } [[TMP40]], 0 +// CHECK-NEXT: [[TMP42:%.*]] = extractvalue { i8, i1 } [[TMP40]], 1 +// CHECK-NEXT: [[TMP43:%.*]] = select i1 [[TMP42]], i8 [[TMP38]], i8 [[TMP41]] +// CHECK-NEXT: store volatile i8 [[TMP43]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP44:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP45:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP46:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP44]], i8 [[TMP45]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP47:%.*]] = extractvalue { i8, i1 } [[TMP46]], 0 +// CHECK-NEXT: [[TMP48:%.*]] = extractvalue { i8, i1 } [[TMP46]], 1 +// CHECK-NEXT: br i1 [[TMP48]], label [[CX_ATOMIC_EXIT:%.*]], label [[CX_ATOMIC_CONT:%.*]] +// CHECK: cx.atomic.cont: +// CHECK-NEXT: store i8 [[TMP47]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT]] +// CHECK: cx.atomic.exit: +// CHECK-NEXT: [[TMP49:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP50:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP51:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP49]], i8 [[TMP50]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP52:%.*]] = extractvalue { i8, i1 } [[TMP51]], 0 +// CHECK-NEXT: [[TMP53:%.*]] = extractvalue { i8, i1 } [[TMP51]], 1 +// CHECK-NEXT: br i1 [[TMP53]], label [[CX_ATOMIC_EXIT1:%.*]], label [[CX_ATOMIC_CONT2:%.*]] +// CHECK: cx.atomic.cont2: +// CHECK-NEXT: store i8 [[TMP52]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT1]] +// CHECK: cx.atomic.exit1: +// CHECK-NEXT: [[TMP54:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP55:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP56:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP54]], i8 [[TMP55]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP57:%.*]] = extractvalue { i8, i1 } [[TMP56]], 1 +// CHECK-NEXT: [[TMP58:%.*]] = zext i1 [[TMP57]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP58]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP59:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP60:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP61:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP59]], i8 [[TMP60]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP62:%.*]] = extractvalue { i8, i1 } [[TMP61]], 1 +// CHECK-NEXT: [[TMP63:%.*]] = zext i1 [[TMP62]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP63]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP64:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP65:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP66:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP64]], i8 [[TMP65]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP67:%.*]] = extractvalue { i8, i1 } [[TMP66]], 0 +// CHECK-NEXT: [[TMP68:%.*]] = extractvalue { i8, i1 } [[TMP66]], 1 +// CHECK-NEXT: br i1 [[TMP68]], label [[CX_ATOMIC_EXIT3:%.*]], label [[CX_ATOMIC_CONT4:%.*]] +// CHECK: cx.atomic.cont4: +// CHECK-NEXT: store i8 [[TMP67]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT3]] +// CHECK: cx.atomic.exit3: +// CHECK-NEXT: [[TMP69:%.*]] = extractvalue { i8, i1 } [[TMP66]], 1 +// CHECK-NEXT: [[TMP70:%.*]] = zext i1 [[TMP69]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP70]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP71:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP72:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP73:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP71]], i8 [[TMP72]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP74:%.*]] = extractvalue { i8, i1 } [[TMP73]], 0 +// CHECK-NEXT: [[TMP75:%.*]] = extractvalue { i8, i1 } [[TMP73]], 1 +// CHECK-NEXT: br i1 [[TMP75]], label [[CX_ATOMIC_EXIT5:%.*]], label [[CX_ATOMIC_CONT6:%.*]] +// CHECK: cx.atomic.cont6: +// CHECK-NEXT: store i8 [[TMP74]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT5]] +// CHECK: cx.atomic.exit5: +// CHECK-NEXT: [[TMP76:%.*]] = extractvalue { i8, i1 } [[TMP73]], 1 +// CHECK-NEXT: [[TMP77:%.*]] = zext i1 [[TMP76]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP77]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP78:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP79:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP78]] acq_rel, align 1 +// CHECK-NEXT: store volatile i8 [[TMP79]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP80:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP81:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP80]] acq_rel, align 1 +// CHECK-NEXT: store volatile i8 [[TMP81]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP82:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP83:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP82]] acq_rel, align 1 +// CHECK-NEXT: store volatile i8 [[TMP83]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP84:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP85:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP84]] acq_rel, align 1 +// CHECK-NEXT: store volatile i8 [[TMP85]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP86:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP87:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP88:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP86]], i8 [[TMP87]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP89:%.*]] = extractvalue { i8, i1 } [[TMP88]], 0 +// CHECK-NEXT: store volatile i8 [[TMP89]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP90:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP91:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP92:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP90]], i8 [[TMP91]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP93:%.*]] = extractvalue { i8, i1 } [[TMP92]], 0 +// CHECK-NEXT: store volatile i8 [[TMP93]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP94:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP95:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP94]] acq_rel, align 1 +// CHECK-NEXT: [[TMP96:%.*]] = icmp ugt i8 [[TMP95]], [[TMP94]] +// CHECK-NEXT: [[TMP97:%.*]] = select i1 [[TMP96]], i8 [[TMP94]], i8 [[TMP95]] +// CHECK-NEXT: store volatile i8 [[TMP97]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP98:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP99:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP98]] acq_rel, align 1 +// CHECK-NEXT: [[TMP100:%.*]] = icmp ult i8 [[TMP99]], [[TMP98]] +// CHECK-NEXT: [[TMP101:%.*]] = select i1 [[TMP100]], i8 [[TMP98]], i8 [[TMP99]] +// CHECK-NEXT: store volatile i8 [[TMP101]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP102:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP103:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP102]] acq_rel, align 1 +// CHECK-NEXT: [[TMP104:%.*]] = icmp ult i8 [[TMP103]], [[TMP102]] +// CHECK-NEXT: [[TMP105:%.*]] = select i1 [[TMP104]], i8 [[TMP102]], i8 [[TMP103]] +// CHECK-NEXT: store volatile i8 [[TMP105]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP106:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP107:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP106]] acq_rel, align 1 +// CHECK-NEXT: [[TMP108:%.*]] = icmp ugt i8 [[TMP107]], [[TMP106]] +// CHECK-NEXT: [[TMP109:%.*]] = select i1 [[TMP108]], i8 [[TMP106]], i8 [[TMP107]] +// CHECK-NEXT: store volatile i8 [[TMP109]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP110:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP111:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP112:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP110]], i8 [[TMP111]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP113:%.*]] = extractvalue { i8, i1 } [[TMP112]], 0 +// CHECK-NEXT: [[TMP114:%.*]] = extractvalue { i8, i1 } [[TMP112]], 1 +// CHECK-NEXT: [[TMP115:%.*]] = select i1 [[TMP114]], i8 [[TMP110]], i8 [[TMP113]] +// CHECK-NEXT: store volatile i8 [[TMP115]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP116:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP117:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP118:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP116]], i8 [[TMP117]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP119:%.*]] = extractvalue { i8, i1 } [[TMP118]], 0 +// CHECK-NEXT: [[TMP120:%.*]] = extractvalue { i8, i1 } [[TMP118]], 1 +// CHECK-NEXT: [[TMP121:%.*]] = select i1 [[TMP120]], i8 [[TMP116]], i8 [[TMP119]] +// CHECK-NEXT: store volatile i8 [[TMP121]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP122:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP123:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP124:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP122]], i8 [[TMP123]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP125:%.*]] = extractvalue { i8, i1 } [[TMP124]], 0 +// CHECK-NEXT: [[TMP126:%.*]] = extractvalue { i8, i1 } [[TMP124]], 1 +// CHECK-NEXT: br i1 [[TMP126]], label [[CX_ATOMIC_EXIT7:%.*]], label [[CX_ATOMIC_CONT8:%.*]] +// CHECK: cx.atomic.cont8: +// CHECK-NEXT: store i8 [[TMP125]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT7]] +// CHECK: cx.atomic.exit7: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP127:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP128:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP129:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP127]], i8 [[TMP128]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP130:%.*]] = extractvalue { i8, i1 } [[TMP129]], 0 +// CHECK-NEXT: [[TMP131:%.*]] = extractvalue { i8, i1 } [[TMP129]], 1 +// CHECK-NEXT: br i1 [[TMP131]], label [[CX_ATOMIC_EXIT9:%.*]], label [[CX_ATOMIC_CONT10:%.*]] +// CHECK: cx.atomic.cont10: +// CHECK-NEXT: store i8 [[TMP130]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT9]] +// CHECK: cx.atomic.exit9: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP132:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP133:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP134:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP132]], i8 [[TMP133]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP135:%.*]] = extractvalue { i8, i1 } [[TMP134]], 1 +// CHECK-NEXT: [[TMP136:%.*]] = zext i1 [[TMP135]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP136]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP137:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP138:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP139:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP137]], i8 [[TMP138]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP140:%.*]] = extractvalue { i8, i1 } [[TMP139]], 1 +// CHECK-NEXT: [[TMP141:%.*]] = zext i1 [[TMP140]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP141]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP142:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP143:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP144:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP142]], i8 [[TMP143]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP145:%.*]] = extractvalue { i8, i1 } [[TMP144]], 0 +// CHECK-NEXT: [[TMP146:%.*]] = extractvalue { i8, i1 } [[TMP144]], 1 +// CHECK-NEXT: br i1 [[TMP146]], label [[CX_ATOMIC_EXIT11:%.*]], label [[CX_ATOMIC_CONT12:%.*]] +// CHECK: cx.atomic.cont12: +// CHECK-NEXT: store i8 [[TMP145]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT11]] +// CHECK: cx.atomic.exit11: +// CHECK-NEXT: [[TMP147:%.*]] = extractvalue { i8, i1 } [[TMP144]], 1 +// CHECK-NEXT: [[TMP148:%.*]] = zext i1 [[TMP147]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP148]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP149:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP150:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP151:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP149]], i8 [[TMP150]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP152:%.*]] = extractvalue { i8, i1 } [[TMP151]], 0 +// CHECK-NEXT: [[TMP153:%.*]] = extractvalue { i8, i1 } [[TMP151]], 1 +// CHECK-NEXT: br i1 [[TMP153]], label [[CX_ATOMIC_EXIT13:%.*]], label [[CX_ATOMIC_CONT14:%.*]] +// CHECK: cx.atomic.cont14: +// CHECK-NEXT: store i8 [[TMP152]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT13]] +// CHECK: cx.atomic.exit13: +// CHECK-NEXT: [[TMP154:%.*]] = extractvalue { i8, i1 } [[TMP151]], 1 +// CHECK-NEXT: [[TMP155:%.*]] = zext i1 [[TMP154]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP155]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP156:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP157:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP156]] acquire, align 1 +// CHECK-NEXT: store volatile i8 [[TMP157]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP158:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP159:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP158]] acquire, align 1 +// CHECK-NEXT: store volatile i8 [[TMP159]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP160:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP161:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP160]] acquire, align 1 +// CHECK-NEXT: store volatile i8 [[TMP161]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP162:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP163:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP162]] acquire, align 1 +// CHECK-NEXT: store volatile i8 [[TMP163]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP164:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP165:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP166:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP164]], i8 [[TMP165]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP167:%.*]] = extractvalue { i8, i1 } [[TMP166]], 0 +// CHECK-NEXT: store volatile i8 [[TMP167]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP168:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP169:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP170:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP168]], i8 [[TMP169]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP171:%.*]] = extractvalue { i8, i1 } [[TMP170]], 0 +// CHECK-NEXT: store volatile i8 [[TMP171]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP172:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP173:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP172]] acquire, align 1 +// CHECK-NEXT: [[TMP174:%.*]] = icmp ugt i8 [[TMP173]], [[TMP172]] +// CHECK-NEXT: [[TMP175:%.*]] = select i1 [[TMP174]], i8 [[TMP172]], i8 [[TMP173]] +// CHECK-NEXT: store volatile i8 [[TMP175]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP176:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP177:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP176]] acquire, align 1 +// CHECK-NEXT: [[TMP178:%.*]] = icmp ult i8 [[TMP177]], [[TMP176]] +// CHECK-NEXT: [[TMP179:%.*]] = select i1 [[TMP178]], i8 [[TMP176]], i8 [[TMP177]] +// CHECK-NEXT: store volatile i8 [[TMP179]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP180:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP181:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP180]] acquire, align 1 +// CHECK-NEXT: [[TMP182:%.*]] = icmp ult i8 [[TMP181]], [[TMP180]] +// CHECK-NEXT: [[TMP183:%.*]] = select i1 [[TMP182]], i8 [[TMP180]], i8 [[TMP181]] +// CHECK-NEXT: store volatile i8 [[TMP183]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP184:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP185:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP184]] acquire, align 1 +// CHECK-NEXT: [[TMP186:%.*]] = icmp ugt i8 [[TMP185]], [[TMP184]] +// CHECK-NEXT: [[TMP187:%.*]] = select i1 [[TMP186]], i8 [[TMP184]], i8 [[TMP185]] +// CHECK-NEXT: store volatile i8 [[TMP187]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP188:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP189:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP190:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP188]], i8 [[TMP189]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP191:%.*]] = extractvalue { i8, i1 } [[TMP190]], 0 +// CHECK-NEXT: [[TMP192:%.*]] = extractvalue { i8, i1 } [[TMP190]], 1 +// CHECK-NEXT: [[TMP193:%.*]] = select i1 [[TMP192]], i8 [[TMP188]], i8 [[TMP191]] +// CHECK-NEXT: store volatile i8 [[TMP193]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP194:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP195:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP196:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP194]], i8 [[TMP195]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP197:%.*]] = extractvalue { i8, i1 } [[TMP196]], 0 +// CHECK-NEXT: [[TMP198:%.*]] = extractvalue { i8, i1 } [[TMP196]], 1 +// CHECK-NEXT: [[TMP199:%.*]] = select i1 [[TMP198]], i8 [[TMP194]], i8 [[TMP197]] +// CHECK-NEXT: store volatile i8 [[TMP199]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP200:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP201:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP202:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP200]], i8 [[TMP201]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP203:%.*]] = extractvalue { i8, i1 } [[TMP202]], 0 +// CHECK-NEXT: [[TMP204:%.*]] = extractvalue { i8, i1 } [[TMP202]], 1 +// CHECK-NEXT: br i1 [[TMP204]], label [[CX_ATOMIC_EXIT15:%.*]], label [[CX_ATOMIC_CONT16:%.*]] +// CHECK: cx.atomic.cont16: +// CHECK-NEXT: store i8 [[TMP203]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT15]] +// CHECK: cx.atomic.exit15: +// CHECK-NEXT: [[TMP205:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP206:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP207:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP205]], i8 [[TMP206]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP208:%.*]] = extractvalue { i8, i1 } [[TMP207]], 0 +// CHECK-NEXT: [[TMP209:%.*]] = extractvalue { i8, i1 } [[TMP207]], 1 +// CHECK-NEXT: br i1 [[TMP209]], label [[CX_ATOMIC_EXIT17:%.*]], label [[CX_ATOMIC_CONT18:%.*]] +// CHECK: cx.atomic.cont18: +// CHECK-NEXT: store i8 [[TMP208]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT17]] +// CHECK: cx.atomic.exit17: +// CHECK-NEXT: [[TMP210:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP211:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP212:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP210]], i8 [[TMP211]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP213:%.*]] = extractvalue { i8, i1 } [[TMP212]], 1 +// CHECK-NEXT: [[TMP214:%.*]] = zext i1 [[TMP213]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP214]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP215:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP216:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP217:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP215]], i8 [[TMP216]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP218:%.*]] = extractvalue { i8, i1 } [[TMP217]], 1 +// CHECK-NEXT: [[TMP219:%.*]] = zext i1 [[TMP218]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP219]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP220:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP221:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP222:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP220]], i8 [[TMP221]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP223:%.*]] = extractvalue { i8, i1 } [[TMP222]], 0 +// CHECK-NEXT: [[TMP224:%.*]] = extractvalue { i8, i1 } [[TMP222]], 1 +// CHECK-NEXT: br i1 [[TMP224]], label [[CX_ATOMIC_EXIT19:%.*]], label [[CX_ATOMIC_CONT20:%.*]] +// CHECK: cx.atomic.cont20: +// CHECK-NEXT: store i8 [[TMP223]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT19]] +// CHECK: cx.atomic.exit19: +// CHECK-NEXT: [[TMP225:%.*]] = extractvalue { i8, i1 } [[TMP222]], 1 +// CHECK-NEXT: [[TMP226:%.*]] = zext i1 [[TMP225]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP226]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP227:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP228:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP229:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP227]], i8 [[TMP228]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP230:%.*]] = extractvalue { i8, i1 } [[TMP229]], 0 +// CHECK-NEXT: [[TMP231:%.*]] = extractvalue { i8, i1 } [[TMP229]], 1 +// CHECK-NEXT: br i1 [[TMP231]], label [[CX_ATOMIC_EXIT21:%.*]], label [[CX_ATOMIC_CONT22:%.*]] +// CHECK: cx.atomic.cont22: +// CHECK-NEXT: store i8 [[TMP230]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT21]] +// CHECK: cx.atomic.exit21: +// CHECK-NEXT: [[TMP232:%.*]] = extractvalue { i8, i1 } [[TMP229]], 1 +// CHECK-NEXT: [[TMP233:%.*]] = zext i1 [[TMP232]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP233]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP234:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP235:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP234]] monotonic, align 1 +// CHECK-NEXT: store volatile i8 [[TMP235]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP236:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP237:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP236]] monotonic, align 1 +// CHECK-NEXT: store volatile i8 [[TMP237]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP238:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP239:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP238]] monotonic, align 1 +// CHECK-NEXT: store volatile i8 [[TMP239]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP240:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP241:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP240]] monotonic, align 1 +// CHECK-NEXT: store volatile i8 [[TMP241]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP242:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP243:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP244:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP242]], i8 [[TMP243]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP245:%.*]] = extractvalue { i8, i1 } [[TMP244]], 0 +// CHECK-NEXT: store volatile i8 [[TMP245]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP246:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP247:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP248:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP246]], i8 [[TMP247]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP249:%.*]] = extractvalue { i8, i1 } [[TMP248]], 0 +// CHECK-NEXT: store volatile i8 [[TMP249]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP250:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP251:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP250]] monotonic, align 1 +// CHECK-NEXT: [[TMP252:%.*]] = icmp ugt i8 [[TMP251]], [[TMP250]] +// CHECK-NEXT: [[TMP253:%.*]] = select i1 [[TMP252]], i8 [[TMP250]], i8 [[TMP251]] +// CHECK-NEXT: store volatile i8 [[TMP253]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP254:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP255:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP254]] monotonic, align 1 +// CHECK-NEXT: [[TMP256:%.*]] = icmp ult i8 [[TMP255]], [[TMP254]] +// CHECK-NEXT: [[TMP257:%.*]] = select i1 [[TMP256]], i8 [[TMP254]], i8 [[TMP255]] +// CHECK-NEXT: store volatile i8 [[TMP257]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP258:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP259:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP258]] monotonic, align 1 +// CHECK-NEXT: [[TMP260:%.*]] = icmp ult i8 [[TMP259]], [[TMP258]] +// CHECK-NEXT: [[TMP261:%.*]] = select i1 [[TMP260]], i8 [[TMP258]], i8 [[TMP259]] +// CHECK-NEXT: store volatile i8 [[TMP261]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP262:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP263:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP262]] monotonic, align 1 +// CHECK-NEXT: [[TMP264:%.*]] = icmp ugt i8 [[TMP263]], [[TMP262]] +// CHECK-NEXT: [[TMP265:%.*]] = select i1 [[TMP264]], i8 [[TMP262]], i8 [[TMP263]] +// CHECK-NEXT: store volatile i8 [[TMP265]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP266:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP267:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP268:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP266]], i8 [[TMP267]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP269:%.*]] = extractvalue { i8, i1 } [[TMP268]], 0 +// CHECK-NEXT: [[TMP270:%.*]] = extractvalue { i8, i1 } [[TMP268]], 1 +// CHECK-NEXT: [[TMP271:%.*]] = select i1 [[TMP270]], i8 [[TMP266]], i8 [[TMP269]] +// CHECK-NEXT: store volatile i8 [[TMP271]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP272:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP273:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP274:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP272]], i8 [[TMP273]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP275:%.*]] = extractvalue { i8, i1 } [[TMP274]], 0 +// CHECK-NEXT: [[TMP276:%.*]] = extractvalue { i8, i1 } [[TMP274]], 1 +// CHECK-NEXT: [[TMP277:%.*]] = select i1 [[TMP276]], i8 [[TMP272]], i8 [[TMP275]] +// CHECK-NEXT: store volatile i8 [[TMP277]], i8* [[CV]], align 1 +// CHECK-NEXT: [[TMP278:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP279:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP280:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP278]], i8 [[TMP279]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP281:%.*]] = extractvalue { i8, i1 } [[TMP280]], 0 +// CHECK-NEXT: [[TMP282:%.*]] = extractvalue { i8, i1 } [[TMP280]], 1 +// CHECK-NEXT: br i1 [[TMP282]], label [[CX_ATOMIC_EXIT23:%.*]], label [[CX_ATOMIC_CONT24:%.*]] +// CHECK: cx.atomic.cont24: +// CHECK-NEXT: store i8 [[TMP281]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT23]] +// CHECK: cx.atomic.exit23: +// CHECK-NEXT: [[TMP283:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP284:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP285:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP283]], i8 [[TMP284]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP286:%.*]] = extractvalue { i8, i1 } [[TMP285]], 0 +// CHECK-NEXT: [[TMP287:%.*]] = extractvalue { i8, i1 } [[TMP285]], 1 +// CHECK-NEXT: br i1 [[TMP287]], label [[CX_ATOMIC_EXIT25:%.*]], label [[CX_ATOMIC_CONT26:%.*]] +// CHECK: cx.atomic.cont26: +// CHECK-NEXT: store i8 [[TMP286]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT25]] +// CHECK: cx.atomic.exit25: +// CHECK-NEXT: [[TMP288:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP289:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP290:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP288]], i8 [[TMP289]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP291:%.*]] = extractvalue { i8, i1 } [[TMP290]], 1 +// CHECK-NEXT: [[TMP292:%.*]] = zext i1 [[TMP291]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP292]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP293:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP294:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP295:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP293]], i8 [[TMP294]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP296:%.*]] = extractvalue { i8, i1 } [[TMP295]], 1 +// CHECK-NEXT: [[TMP297:%.*]] = zext i1 [[TMP296]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP297]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP298:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP299:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP300:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP298]], i8 [[TMP299]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP301:%.*]] = extractvalue { i8, i1 } [[TMP300]], 0 +// CHECK-NEXT: [[TMP302:%.*]] = extractvalue { i8, i1 } [[TMP300]], 1 +// CHECK-NEXT: br i1 [[TMP302]], label [[CX_ATOMIC_EXIT27:%.*]], label [[CX_ATOMIC_CONT28:%.*]] +// CHECK: cx.atomic.cont28: +// CHECK-NEXT: store i8 [[TMP301]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT27]] +// CHECK: cx.atomic.exit27: +// CHECK-NEXT: [[TMP303:%.*]] = extractvalue { i8, i1 } [[TMP300]], 1 +// CHECK-NEXT: [[TMP304:%.*]] = zext i1 [[TMP303]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP304]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP305:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP306:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP307:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP305]], i8 [[TMP306]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP308:%.*]] = extractvalue { i8, i1 } [[TMP307]], 0 +// CHECK-NEXT: [[TMP309:%.*]] = extractvalue { i8, i1 } [[TMP307]], 1 +// CHECK-NEXT: br i1 [[TMP309]], label [[CX_ATOMIC_EXIT29:%.*]], label [[CX_ATOMIC_CONT30:%.*]] +// CHECK: cx.atomic.cont30: +// CHECK-NEXT: store i8 [[TMP308]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT29]] +// CHECK: cx.atomic.exit29: +// CHECK-NEXT: [[TMP310:%.*]] = extractvalue { i8, i1 } [[TMP307]], 1 +// CHECK-NEXT: [[TMP311:%.*]] = zext i1 [[TMP310]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP311]], i8* [[CR]], align 1 +// CHECK-NEXT: [[TMP312:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP313:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP312]] release, align 1 +// CHECK-NEXT: store volatile i8 [[TMP313]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP314:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP315:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP314]] release, align 1 +// CHECK-NEXT: store volatile i8 [[TMP315]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP316:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP317:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP316]] release, align 1 +// CHECK-NEXT: store volatile i8 [[TMP317]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP318:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP319:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP318]] release, align 1 +// CHECK-NEXT: store volatile i8 [[TMP319]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP320:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP321:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP322:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP320]], i8 [[TMP321]] release monotonic, align 1 +// CHECK-NEXT: [[TMP323:%.*]] = extractvalue { i8, i1 } [[TMP322]], 0 +// CHECK-NEXT: store volatile i8 [[TMP323]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP324:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP325:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP326:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP324]], i8 [[TMP325]] release monotonic, align 1 +// CHECK-NEXT: [[TMP327:%.*]] = extractvalue { i8, i1 } [[TMP326]], 0 +// CHECK-NEXT: store volatile i8 [[TMP327]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP328:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP329:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP328]] release, align 1 +// CHECK-NEXT: [[TMP330:%.*]] = icmp ugt i8 [[TMP329]], [[TMP328]] +// CHECK-NEXT: [[TMP331:%.*]] = select i1 [[TMP330]], i8 [[TMP328]], i8 [[TMP329]] +// CHECK-NEXT: store volatile i8 [[TMP331]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP332:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP333:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP332]] release, align 1 +// CHECK-NEXT: [[TMP334:%.*]] = icmp ult i8 [[TMP333]], [[TMP332]] +// CHECK-NEXT: [[TMP335:%.*]] = select i1 [[TMP334]], i8 [[TMP332]], i8 [[TMP333]] +// CHECK-NEXT: store volatile i8 [[TMP335]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP336:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP337:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP336]] release, align 1 +// CHECK-NEXT: [[TMP338:%.*]] = icmp ult i8 [[TMP337]], [[TMP336]] +// CHECK-NEXT: [[TMP339:%.*]] = select i1 [[TMP338]], i8 [[TMP336]], i8 [[TMP337]] +// CHECK-NEXT: store volatile i8 [[TMP339]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP340:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP341:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP340]] release, align 1 +// CHECK-NEXT: [[TMP342:%.*]] = icmp ugt i8 [[TMP341]], [[TMP340]] +// CHECK-NEXT: [[TMP343:%.*]] = select i1 [[TMP342]], i8 [[TMP340]], i8 [[TMP341]] +// CHECK-NEXT: store volatile i8 [[TMP343]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP344:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP345:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP346:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP344]], i8 [[TMP345]] release monotonic, align 1 +// CHECK-NEXT: [[TMP347:%.*]] = extractvalue { i8, i1 } [[TMP346]], 0 +// CHECK-NEXT: [[TMP348:%.*]] = extractvalue { i8, i1 } [[TMP346]], 1 +// CHECK-NEXT: [[TMP349:%.*]] = select i1 [[TMP348]], i8 [[TMP344]], i8 [[TMP347]] +// CHECK-NEXT: store volatile i8 [[TMP349]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP350:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP351:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP352:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP350]], i8 [[TMP351]] release monotonic, align 1 +// CHECK-NEXT: [[TMP353:%.*]] = extractvalue { i8, i1 } [[TMP352]], 0 +// CHECK-NEXT: [[TMP354:%.*]] = extractvalue { i8, i1 } [[TMP352]], 1 +// CHECK-NEXT: [[TMP355:%.*]] = select i1 [[TMP354]], i8 [[TMP350]], i8 [[TMP353]] +// CHECK-NEXT: store volatile i8 [[TMP355]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP356:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP357:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP358:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP356]], i8 [[TMP357]] release monotonic, align 1 +// CHECK-NEXT: [[TMP359:%.*]] = extractvalue { i8, i1 } [[TMP358]], 0 +// CHECK-NEXT: [[TMP360:%.*]] = extractvalue { i8, i1 } [[TMP358]], 1 +// CHECK-NEXT: br i1 [[TMP360]], label [[CX_ATOMIC_EXIT31:%.*]], label [[CX_ATOMIC_CONT32:%.*]] +// CHECK: cx.atomic.cont32: +// CHECK-NEXT: store i8 [[TMP359]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT31]] +// CHECK: cx.atomic.exit31: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP361:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP362:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP363:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP361]], i8 [[TMP362]] release monotonic, align 1 +// CHECK-NEXT: [[TMP364:%.*]] = extractvalue { i8, i1 } [[TMP363]], 0 +// CHECK-NEXT: [[TMP365:%.*]] = extractvalue { i8, i1 } [[TMP363]], 1 +// CHECK-NEXT: br i1 [[TMP365]], label [[CX_ATOMIC_EXIT33:%.*]], label [[CX_ATOMIC_CONT34:%.*]] +// CHECK: cx.atomic.cont34: +// CHECK-NEXT: store i8 [[TMP364]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT33]] +// CHECK: cx.atomic.exit33: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP366:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP367:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP368:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP366]], i8 [[TMP367]] release monotonic, align 1 +// CHECK-NEXT: [[TMP369:%.*]] = extractvalue { i8, i1 } [[TMP368]], 1 +// CHECK-NEXT: [[TMP370:%.*]] = zext i1 [[TMP369]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP370]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP371:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP372:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP373:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP371]], i8 [[TMP372]] release monotonic, align 1 +// CHECK-NEXT: [[TMP374:%.*]] = extractvalue { i8, i1 } [[TMP373]], 1 +// CHECK-NEXT: [[TMP375:%.*]] = zext i1 [[TMP374]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP375]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP376:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP377:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP378:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP376]], i8 [[TMP377]] release monotonic, align 1 +// CHECK-NEXT: [[TMP379:%.*]] = extractvalue { i8, i1 } [[TMP378]], 0 +// CHECK-NEXT: [[TMP380:%.*]] = extractvalue { i8, i1 } [[TMP378]], 1 +// CHECK-NEXT: br i1 [[TMP380]], label [[CX_ATOMIC_EXIT35:%.*]], label [[CX_ATOMIC_CONT36:%.*]] +// CHECK: cx.atomic.cont36: +// CHECK-NEXT: store i8 [[TMP379]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT35]] +// CHECK: cx.atomic.exit35: +// CHECK-NEXT: [[TMP381:%.*]] = extractvalue { i8, i1 } [[TMP378]], 1 +// CHECK-NEXT: [[TMP382:%.*]] = zext i1 [[TMP381]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP382]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP383:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP384:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP385:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP383]], i8 [[TMP384]] release monotonic, align 1 +// CHECK-NEXT: [[TMP386:%.*]] = extractvalue { i8, i1 } [[TMP385]], 0 +// CHECK-NEXT: [[TMP387:%.*]] = extractvalue { i8, i1 } [[TMP385]], 1 +// CHECK-NEXT: br i1 [[TMP387]], label [[CX_ATOMIC_EXIT37:%.*]], label [[CX_ATOMIC_CONT38:%.*]] +// CHECK: cx.atomic.cont38: +// CHECK-NEXT: store i8 [[TMP386]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT37]] +// CHECK: cx.atomic.exit37: +// CHECK-NEXT: [[TMP388:%.*]] = extractvalue { i8, i1 } [[TMP385]], 1 +// CHECK-NEXT: [[TMP389:%.*]] = zext i1 [[TMP388]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP389]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP390:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP391:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP390]] seq_cst, align 1 +// CHECK-NEXT: store volatile i8 [[TMP391]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP392:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP393:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP392]] seq_cst, align 1 +// CHECK-NEXT: store volatile i8 [[TMP393]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP394:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP395:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP394]] seq_cst, align 1 +// CHECK-NEXT: store volatile i8 [[TMP395]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP396:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP397:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP396]] seq_cst, align 1 +// CHECK-NEXT: store volatile i8 [[TMP397]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP398:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP399:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP400:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP398]], i8 [[TMP399]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP401:%.*]] = extractvalue { i8, i1 } [[TMP400]], 0 +// CHECK-NEXT: store volatile i8 [[TMP401]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP402:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP403:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP404:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP402]], i8 [[TMP403]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP405:%.*]] = extractvalue { i8, i1 } [[TMP404]], 0 +// CHECK-NEXT: store volatile i8 [[TMP405]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP406:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP407:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP406]] seq_cst, align 1 +// CHECK-NEXT: [[TMP408:%.*]] = icmp ugt i8 [[TMP407]], [[TMP406]] +// CHECK-NEXT: [[TMP409:%.*]] = select i1 [[TMP408]], i8 [[TMP406]], i8 [[TMP407]] +// CHECK-NEXT: store volatile i8 [[TMP409]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP410:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP411:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP410]] seq_cst, align 1 +// CHECK-NEXT: [[TMP412:%.*]] = icmp ult i8 [[TMP411]], [[TMP410]] +// CHECK-NEXT: [[TMP413:%.*]] = select i1 [[TMP412]], i8 [[TMP410]], i8 [[TMP411]] +// CHECK-NEXT: store volatile i8 [[TMP413]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP414:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP415:%.*]] = atomicrmw umin i8* [[CX]], i8 [[TMP414]] seq_cst, align 1 +// CHECK-NEXT: [[TMP416:%.*]] = icmp ult i8 [[TMP415]], [[TMP414]] +// CHECK-NEXT: [[TMP417:%.*]] = select i1 [[TMP416]], i8 [[TMP414]], i8 [[TMP415]] +// CHECK-NEXT: store volatile i8 [[TMP417]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP418:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP419:%.*]] = atomicrmw umax i8* [[CX]], i8 [[TMP418]] seq_cst, align 1 +// CHECK-NEXT: [[TMP420:%.*]] = icmp ugt i8 [[TMP419]], [[TMP418]] +// CHECK-NEXT: [[TMP421:%.*]] = select i1 [[TMP420]], i8 [[TMP418]], i8 [[TMP419]] +// CHECK-NEXT: store volatile i8 [[TMP421]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP422:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP423:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP424:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP422]], i8 [[TMP423]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP425:%.*]] = extractvalue { i8, i1 } [[TMP424]], 0 +// CHECK-NEXT: [[TMP426:%.*]] = extractvalue { i8, i1 } [[TMP424]], 1 +// CHECK-NEXT: [[TMP427:%.*]] = select i1 [[TMP426]], i8 [[TMP422]], i8 [[TMP425]] +// CHECK-NEXT: store volatile i8 [[TMP427]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP428:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP429:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP430:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP428]], i8 [[TMP429]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP431:%.*]] = extractvalue { i8, i1 } [[TMP430]], 0 +// CHECK-NEXT: [[TMP432:%.*]] = extractvalue { i8, i1 } [[TMP430]], 1 +// CHECK-NEXT: [[TMP433:%.*]] = select i1 [[TMP432]], i8 [[TMP428]], i8 [[TMP431]] +// CHECK-NEXT: store volatile i8 [[TMP433]], i8* [[CV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP434:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP435:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP436:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP434]], i8 [[TMP435]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP437:%.*]] = extractvalue { i8, i1 } [[TMP436]], 0 +// CHECK-NEXT: [[TMP438:%.*]] = extractvalue { i8, i1 } [[TMP436]], 1 +// CHECK-NEXT: br i1 [[TMP438]], label [[CX_ATOMIC_EXIT39:%.*]], label [[CX_ATOMIC_CONT40:%.*]] +// CHECK: cx.atomic.cont40: +// CHECK-NEXT: store i8 [[TMP437]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT39]] +// CHECK: cx.atomic.exit39: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP439:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP440:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP441:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP439]], i8 [[TMP440]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP442:%.*]] = extractvalue { i8, i1 } [[TMP441]], 0 +// CHECK-NEXT: [[TMP443:%.*]] = extractvalue { i8, i1 } [[TMP441]], 1 +// CHECK-NEXT: br i1 [[TMP443]], label [[CX_ATOMIC_EXIT41:%.*]], label [[CX_ATOMIC_CONT42:%.*]] +// CHECK: cx.atomic.cont42: +// CHECK-NEXT: store i8 [[TMP442]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT41]] +// CHECK: cx.atomic.exit41: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP444:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP445:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP446:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP444]], i8 [[TMP445]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP447:%.*]] = extractvalue { i8, i1 } [[TMP446]], 1 +// CHECK-NEXT: [[TMP448:%.*]] = zext i1 [[TMP447]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP448]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP449:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP450:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP451:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP449]], i8 [[TMP450]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP452:%.*]] = extractvalue { i8, i1 } [[TMP451]], 1 +// CHECK-NEXT: [[TMP453:%.*]] = zext i1 [[TMP452]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP453]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP454:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP455:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP456:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP454]], i8 [[TMP455]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP457:%.*]] = extractvalue { i8, i1 } [[TMP456]], 0 +// CHECK-NEXT: [[TMP458:%.*]] = extractvalue { i8, i1 } [[TMP456]], 1 +// CHECK-NEXT: br i1 [[TMP458]], label [[CX_ATOMIC_EXIT43:%.*]], label [[CX_ATOMIC_CONT44:%.*]] +// CHECK: cx.atomic.cont44: +// CHECK-NEXT: store i8 [[TMP457]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT43]] +// CHECK: cx.atomic.exit43: +// CHECK-NEXT: [[TMP459:%.*]] = extractvalue { i8, i1 } [[TMP456]], 1 +// CHECK-NEXT: [[TMP460:%.*]] = zext i1 [[TMP459]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP460]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP461:%.*]] = load i8, i8* [[CE]], align 1 +// CHECK-NEXT: [[TMP462:%.*]] = load i8, i8* [[CD]], align 1 +// CHECK-NEXT: [[TMP463:%.*]] = cmpxchg i8* [[CX]], i8 [[TMP461]], i8 [[TMP462]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP464:%.*]] = extractvalue { i8, i1 } [[TMP463]], 0 +// CHECK-NEXT: [[TMP465:%.*]] = extractvalue { i8, i1 } [[TMP463]], 1 +// CHECK-NEXT: br i1 [[TMP465]], label [[CX_ATOMIC_EXIT45:%.*]], label [[CX_ATOMIC_CONT46:%.*]] +// CHECK: cx.atomic.cont46: +// CHECK-NEXT: store i8 [[TMP464]], i8* [[CV]], align 1 +// CHECK-NEXT: br label [[CX_ATOMIC_EXIT45]] +// CHECK: cx.atomic.exit45: +// CHECK-NEXT: [[TMP466:%.*]] = extractvalue { i8, i1 } [[TMP463]], 1 +// CHECK-NEXT: [[TMP467:%.*]] = zext i1 [[TMP466]] to i8 +// CHECK-NEXT: store volatile i8 [[TMP467]], i8* [[CR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP468:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP469:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP468]] monotonic, align 1 +// CHECK-NEXT: store i8 [[TMP469]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP470:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP471:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP470]] monotonic, align 1 +// CHECK-NEXT: store i8 [[TMP471]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP472:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP473:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP472]] monotonic, align 1 +// CHECK-NEXT: store i8 [[TMP473]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP474:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP475:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP474]] monotonic, align 1 +// CHECK-NEXT: store i8 [[TMP475]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP476:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP477:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP478:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP476]], i8 [[TMP477]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP479:%.*]] = extractvalue { i8, i1 } [[TMP478]], 0 +// CHECK-NEXT: store i8 [[TMP479]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP480:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP481:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP482:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP480]], i8 [[TMP481]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP483:%.*]] = extractvalue { i8, i1 } [[TMP482]], 0 +// CHECK-NEXT: store i8 [[TMP483]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP484:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP485:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP484]] monotonic, align 1 +// CHECK-NEXT: [[TMP486:%.*]] = icmp ugt i8 [[TMP485]], [[TMP484]] +// CHECK-NEXT: [[TMP487:%.*]] = select i1 [[TMP486]], i8 [[TMP484]], i8 [[TMP485]] +// CHECK-NEXT: store i8 [[TMP487]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP488:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP489:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP488]] monotonic, align 1 +// CHECK-NEXT: [[TMP490:%.*]] = icmp ult i8 [[TMP489]], [[TMP488]] +// CHECK-NEXT: [[TMP491:%.*]] = select i1 [[TMP490]], i8 [[TMP488]], i8 [[TMP489]] +// CHECK-NEXT: store i8 [[TMP491]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP492:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP493:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP492]] monotonic, align 1 +// CHECK-NEXT: [[TMP494:%.*]] = icmp ult i8 [[TMP493]], [[TMP492]] +// CHECK-NEXT: [[TMP495:%.*]] = select i1 [[TMP494]], i8 [[TMP492]], i8 [[TMP493]] +// CHECK-NEXT: store i8 [[TMP495]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP496:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP497:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP496]] monotonic, align 1 +// CHECK-NEXT: [[TMP498:%.*]] = icmp ugt i8 [[TMP497]], [[TMP496]] +// CHECK-NEXT: [[TMP499:%.*]] = select i1 [[TMP498]], i8 [[TMP496]], i8 [[TMP497]] +// CHECK-NEXT: store i8 [[TMP499]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP500:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP501:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP502:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP500]], i8 [[TMP501]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP503:%.*]] = extractvalue { i8, i1 } [[TMP502]], 0 +// CHECK-NEXT: [[TMP504:%.*]] = extractvalue { i8, i1 } [[TMP502]], 1 +// CHECK-NEXT: [[TMP505:%.*]] = select i1 [[TMP504]], i8 [[TMP500]], i8 [[TMP503]] +// CHECK-NEXT: store i8 [[TMP505]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP506:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP507:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP508:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP506]], i8 [[TMP507]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP509:%.*]] = extractvalue { i8, i1 } [[TMP508]], 0 +// CHECK-NEXT: [[TMP510:%.*]] = extractvalue { i8, i1 } [[TMP508]], 1 +// CHECK-NEXT: [[TMP511:%.*]] = select i1 [[TMP510]], i8 [[TMP506]], i8 [[TMP509]] +// CHECK-NEXT: store i8 [[TMP511]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP512:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP513:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP514:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP512]], i8 [[TMP513]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP515:%.*]] = extractvalue { i8, i1 } [[TMP514]], 0 +// CHECK-NEXT: [[TMP516:%.*]] = extractvalue { i8, i1 } [[TMP514]], 1 +// CHECK-NEXT: br i1 [[TMP516]], label [[UCX_ATOMIC_EXIT:%.*]], label [[UCX_ATOMIC_CONT:%.*]] +// CHECK: ucx.atomic.cont: +// CHECK-NEXT: store i8 [[TMP515]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT]] +// CHECK: ucx.atomic.exit: +// CHECK-NEXT: [[TMP517:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP518:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP519:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP517]], i8 [[TMP518]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP520:%.*]] = extractvalue { i8, i1 } [[TMP519]], 0 +// CHECK-NEXT: [[TMP521:%.*]] = extractvalue { i8, i1 } [[TMP519]], 1 +// CHECK-NEXT: br i1 [[TMP521]], label [[UCX_ATOMIC_EXIT47:%.*]], label [[UCX_ATOMIC_CONT48:%.*]] +// CHECK: ucx.atomic.cont48: +// CHECK-NEXT: store i8 [[TMP520]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT47]] +// CHECK: ucx.atomic.exit47: +// CHECK-NEXT: [[TMP522:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP523:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP524:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP522]], i8 [[TMP523]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP525:%.*]] = extractvalue { i8, i1 } [[TMP524]], 1 +// CHECK-NEXT: [[TMP526:%.*]] = zext i1 [[TMP525]] to i8 +// CHECK-NEXT: store i8 [[TMP526]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP527:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP528:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP529:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP527]], i8 [[TMP528]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP530:%.*]] = extractvalue { i8, i1 } [[TMP529]], 1 +// CHECK-NEXT: [[TMP531:%.*]] = zext i1 [[TMP530]] to i8 +// CHECK-NEXT: store i8 [[TMP531]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP532:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP533:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP534:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP532]], i8 [[TMP533]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP535:%.*]] = extractvalue { i8, i1 } [[TMP534]], 0 +// CHECK-NEXT: [[TMP536:%.*]] = extractvalue { i8, i1 } [[TMP534]], 1 +// CHECK-NEXT: br i1 [[TMP536]], label [[UCX_ATOMIC_EXIT49:%.*]], label [[UCX_ATOMIC_CONT50:%.*]] +// CHECK: ucx.atomic.cont50: +// CHECK-NEXT: store i8 [[TMP535]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT49]] +// CHECK: ucx.atomic.exit49: +// CHECK-NEXT: [[TMP537:%.*]] = extractvalue { i8, i1 } [[TMP534]], 1 +// CHECK-NEXT: [[TMP538:%.*]] = zext i1 [[TMP537]] to i8 +// CHECK-NEXT: store i8 [[TMP538]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP539:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP540:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP541:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP539]], i8 [[TMP540]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP542:%.*]] = extractvalue { i8, i1 } [[TMP541]], 0 +// CHECK-NEXT: [[TMP543:%.*]] = extractvalue { i8, i1 } [[TMP541]], 1 +// CHECK-NEXT: br i1 [[TMP543]], label [[UCX_ATOMIC_EXIT51:%.*]], label [[UCX_ATOMIC_CONT52:%.*]] +// CHECK: ucx.atomic.cont52: +// CHECK-NEXT: store i8 [[TMP542]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT51]] +// CHECK: ucx.atomic.exit51: +// CHECK-NEXT: [[TMP544:%.*]] = extractvalue { i8, i1 } [[TMP541]], 1 +// CHECK-NEXT: [[TMP545:%.*]] = zext i1 [[TMP544]] to i8 +// CHECK-NEXT: store i8 [[TMP545]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP546:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP547:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP546]] acq_rel, align 1 +// CHECK-NEXT: store i8 [[TMP547]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP548:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP549:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP548]] acq_rel, align 1 +// CHECK-NEXT: store i8 [[TMP549]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP550:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP551:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP550]] acq_rel, align 1 +// CHECK-NEXT: store i8 [[TMP551]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP552:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP553:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP552]] acq_rel, align 1 +// CHECK-NEXT: store i8 [[TMP553]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP554:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP555:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP556:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP554]], i8 [[TMP555]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP557:%.*]] = extractvalue { i8, i1 } [[TMP556]], 0 +// CHECK-NEXT: store i8 [[TMP557]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP558:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP559:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP560:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP558]], i8 [[TMP559]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP561:%.*]] = extractvalue { i8, i1 } [[TMP560]], 0 +// CHECK-NEXT: store i8 [[TMP561]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP562:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP563:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP562]] acq_rel, align 1 +// CHECK-NEXT: [[TMP564:%.*]] = icmp ugt i8 [[TMP563]], [[TMP562]] +// CHECK-NEXT: [[TMP565:%.*]] = select i1 [[TMP564]], i8 [[TMP562]], i8 [[TMP563]] +// CHECK-NEXT: store i8 [[TMP565]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP566:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP567:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP566]] acq_rel, align 1 +// CHECK-NEXT: [[TMP568:%.*]] = icmp ult i8 [[TMP567]], [[TMP566]] +// CHECK-NEXT: [[TMP569:%.*]] = select i1 [[TMP568]], i8 [[TMP566]], i8 [[TMP567]] +// CHECK-NEXT: store i8 [[TMP569]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP570:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP571:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP570]] acq_rel, align 1 +// CHECK-NEXT: [[TMP572:%.*]] = icmp ult i8 [[TMP571]], [[TMP570]] +// CHECK-NEXT: [[TMP573:%.*]] = select i1 [[TMP572]], i8 [[TMP570]], i8 [[TMP571]] +// CHECK-NEXT: store i8 [[TMP573]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP574:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP575:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP574]] acq_rel, align 1 +// CHECK-NEXT: [[TMP576:%.*]] = icmp ugt i8 [[TMP575]], [[TMP574]] +// CHECK-NEXT: [[TMP577:%.*]] = select i1 [[TMP576]], i8 [[TMP574]], i8 [[TMP575]] +// CHECK-NEXT: store i8 [[TMP577]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP578:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP579:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP580:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP578]], i8 [[TMP579]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP581:%.*]] = extractvalue { i8, i1 } [[TMP580]], 0 +// CHECK-NEXT: [[TMP582:%.*]] = extractvalue { i8, i1 } [[TMP580]], 1 +// CHECK-NEXT: [[TMP583:%.*]] = select i1 [[TMP582]], i8 [[TMP578]], i8 [[TMP581]] +// CHECK-NEXT: store i8 [[TMP583]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP584:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP585:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP586:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP584]], i8 [[TMP585]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP587:%.*]] = extractvalue { i8, i1 } [[TMP586]], 0 +// CHECK-NEXT: [[TMP588:%.*]] = extractvalue { i8, i1 } [[TMP586]], 1 +// CHECK-NEXT: [[TMP589:%.*]] = select i1 [[TMP588]], i8 [[TMP584]], i8 [[TMP587]] +// CHECK-NEXT: store i8 [[TMP589]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP590:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP591:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP592:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP590]], i8 [[TMP591]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP593:%.*]] = extractvalue { i8, i1 } [[TMP592]], 0 +// CHECK-NEXT: [[TMP594:%.*]] = extractvalue { i8, i1 } [[TMP592]], 1 +// CHECK-NEXT: br i1 [[TMP594]], label [[UCX_ATOMIC_EXIT53:%.*]], label [[UCX_ATOMIC_CONT54:%.*]] +// CHECK: ucx.atomic.cont54: +// CHECK-NEXT: store i8 [[TMP593]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT53]] +// CHECK: ucx.atomic.exit53: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP595:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP596:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP597:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP595]], i8 [[TMP596]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP598:%.*]] = extractvalue { i8, i1 } [[TMP597]], 0 +// CHECK-NEXT: [[TMP599:%.*]] = extractvalue { i8, i1 } [[TMP597]], 1 +// CHECK-NEXT: br i1 [[TMP599]], label [[UCX_ATOMIC_EXIT55:%.*]], label [[UCX_ATOMIC_CONT56:%.*]] +// CHECK: ucx.atomic.cont56: +// CHECK-NEXT: store i8 [[TMP598]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT55]] +// CHECK: ucx.atomic.exit55: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP600:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP601:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP602:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP600]], i8 [[TMP601]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP603:%.*]] = extractvalue { i8, i1 } [[TMP602]], 1 +// CHECK-NEXT: [[TMP604:%.*]] = zext i1 [[TMP603]] to i8 +// CHECK-NEXT: store i8 [[TMP604]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP605:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP606:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP607:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP605]], i8 [[TMP606]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP608:%.*]] = extractvalue { i8, i1 } [[TMP607]], 1 +// CHECK-NEXT: [[TMP609:%.*]] = zext i1 [[TMP608]] to i8 +// CHECK-NEXT: store i8 [[TMP609]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP610:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP611:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP612:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP610]], i8 [[TMP611]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP613:%.*]] = extractvalue { i8, i1 } [[TMP612]], 0 +// CHECK-NEXT: [[TMP614:%.*]] = extractvalue { i8, i1 } [[TMP612]], 1 +// CHECK-NEXT: br i1 [[TMP614]], label [[UCX_ATOMIC_EXIT57:%.*]], label [[UCX_ATOMIC_CONT58:%.*]] +// CHECK: ucx.atomic.cont58: +// CHECK-NEXT: store i8 [[TMP613]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT57]] +// CHECK: ucx.atomic.exit57: +// CHECK-NEXT: [[TMP615:%.*]] = extractvalue { i8, i1 } [[TMP612]], 1 +// CHECK-NEXT: [[TMP616:%.*]] = zext i1 [[TMP615]] to i8 +// CHECK-NEXT: store i8 [[TMP616]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP617:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP618:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP619:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP617]], i8 [[TMP618]] acq_rel acquire, align 1 +// CHECK-NEXT: [[TMP620:%.*]] = extractvalue { i8, i1 } [[TMP619]], 0 +// CHECK-NEXT: [[TMP621:%.*]] = extractvalue { i8, i1 } [[TMP619]], 1 +// CHECK-NEXT: br i1 [[TMP621]], label [[UCX_ATOMIC_EXIT59:%.*]], label [[UCX_ATOMIC_CONT60:%.*]] +// CHECK: ucx.atomic.cont60: +// CHECK-NEXT: store i8 [[TMP620]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT59]] +// CHECK: ucx.atomic.exit59: +// CHECK-NEXT: [[TMP622:%.*]] = extractvalue { i8, i1 } [[TMP619]], 1 +// CHECK-NEXT: [[TMP623:%.*]] = zext i1 [[TMP622]] to i8 +// CHECK-NEXT: store i8 [[TMP623]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP624:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP625:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP624]] acquire, align 1 +// CHECK-NEXT: store i8 [[TMP625]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP626:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP627:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP626]] acquire, align 1 +// CHECK-NEXT: store i8 [[TMP627]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP628:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP629:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP628]] acquire, align 1 +// CHECK-NEXT: store i8 [[TMP629]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP630:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP631:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP630]] acquire, align 1 +// CHECK-NEXT: store i8 [[TMP631]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP632:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP633:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP634:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP632]], i8 [[TMP633]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP635:%.*]] = extractvalue { i8, i1 } [[TMP634]], 0 +// CHECK-NEXT: store i8 [[TMP635]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP636:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP637:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP638:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP636]], i8 [[TMP637]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP639:%.*]] = extractvalue { i8, i1 } [[TMP638]], 0 +// CHECK-NEXT: store i8 [[TMP639]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP640:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP641:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP640]] acquire, align 1 +// CHECK-NEXT: [[TMP642:%.*]] = icmp ugt i8 [[TMP641]], [[TMP640]] +// CHECK-NEXT: [[TMP643:%.*]] = select i1 [[TMP642]], i8 [[TMP640]], i8 [[TMP641]] +// CHECK-NEXT: store i8 [[TMP643]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP644:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP645:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP644]] acquire, align 1 +// CHECK-NEXT: [[TMP646:%.*]] = icmp ult i8 [[TMP645]], [[TMP644]] +// CHECK-NEXT: [[TMP647:%.*]] = select i1 [[TMP646]], i8 [[TMP644]], i8 [[TMP645]] +// CHECK-NEXT: store i8 [[TMP647]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP648:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP649:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP648]] acquire, align 1 +// CHECK-NEXT: [[TMP650:%.*]] = icmp ult i8 [[TMP649]], [[TMP648]] +// CHECK-NEXT: [[TMP651:%.*]] = select i1 [[TMP650]], i8 [[TMP648]], i8 [[TMP649]] +// CHECK-NEXT: store i8 [[TMP651]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP652:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP653:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP652]] acquire, align 1 +// CHECK-NEXT: [[TMP654:%.*]] = icmp ugt i8 [[TMP653]], [[TMP652]] +// CHECK-NEXT: [[TMP655:%.*]] = select i1 [[TMP654]], i8 [[TMP652]], i8 [[TMP653]] +// CHECK-NEXT: store i8 [[TMP655]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP656:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP657:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP658:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP656]], i8 [[TMP657]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP659:%.*]] = extractvalue { i8, i1 } [[TMP658]], 0 +// CHECK-NEXT: [[TMP660:%.*]] = extractvalue { i8, i1 } [[TMP658]], 1 +// CHECK-NEXT: [[TMP661:%.*]] = select i1 [[TMP660]], i8 [[TMP656]], i8 [[TMP659]] +// CHECK-NEXT: store i8 [[TMP661]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP662:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP663:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP664:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP662]], i8 [[TMP663]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP665:%.*]] = extractvalue { i8, i1 } [[TMP664]], 0 +// CHECK-NEXT: [[TMP666:%.*]] = extractvalue { i8, i1 } [[TMP664]], 1 +// CHECK-NEXT: [[TMP667:%.*]] = select i1 [[TMP666]], i8 [[TMP662]], i8 [[TMP665]] +// CHECK-NEXT: store i8 [[TMP667]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP668:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP669:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP670:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP668]], i8 [[TMP669]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP671:%.*]] = extractvalue { i8, i1 } [[TMP670]], 0 +// CHECK-NEXT: [[TMP672:%.*]] = extractvalue { i8, i1 } [[TMP670]], 1 +// CHECK-NEXT: br i1 [[TMP672]], label [[UCX_ATOMIC_EXIT61:%.*]], label [[UCX_ATOMIC_CONT62:%.*]] +// CHECK: ucx.atomic.cont62: +// CHECK-NEXT: store i8 [[TMP671]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT61]] +// CHECK: ucx.atomic.exit61: +// CHECK-NEXT: [[TMP673:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP674:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP675:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP673]], i8 [[TMP674]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP676:%.*]] = extractvalue { i8, i1 } [[TMP675]], 0 +// CHECK-NEXT: [[TMP677:%.*]] = extractvalue { i8, i1 } [[TMP675]], 1 +// CHECK-NEXT: br i1 [[TMP677]], label [[UCX_ATOMIC_EXIT63:%.*]], label [[UCX_ATOMIC_CONT64:%.*]] +// CHECK: ucx.atomic.cont64: +// CHECK-NEXT: store i8 [[TMP676]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT63]] +// CHECK: ucx.atomic.exit63: +// CHECK-NEXT: [[TMP678:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP679:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP680:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP678]], i8 [[TMP679]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP681:%.*]] = extractvalue { i8, i1 } [[TMP680]], 1 +// CHECK-NEXT: [[TMP682:%.*]] = zext i1 [[TMP681]] to i8 +// CHECK-NEXT: store i8 [[TMP682]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP683:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP684:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP685:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP683]], i8 [[TMP684]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP686:%.*]] = extractvalue { i8, i1 } [[TMP685]], 1 +// CHECK-NEXT: [[TMP687:%.*]] = zext i1 [[TMP686]] to i8 +// CHECK-NEXT: store i8 [[TMP687]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP688:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP689:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP690:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP688]], i8 [[TMP689]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP691:%.*]] = extractvalue { i8, i1 } [[TMP690]], 0 +// CHECK-NEXT: [[TMP692:%.*]] = extractvalue { i8, i1 } [[TMP690]], 1 +// CHECK-NEXT: br i1 [[TMP692]], label [[UCX_ATOMIC_EXIT65:%.*]], label [[UCX_ATOMIC_CONT66:%.*]] +// CHECK: ucx.atomic.cont66: +// CHECK-NEXT: store i8 [[TMP691]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT65]] +// CHECK: ucx.atomic.exit65: +// CHECK-NEXT: [[TMP693:%.*]] = extractvalue { i8, i1 } [[TMP690]], 1 +// CHECK-NEXT: [[TMP694:%.*]] = zext i1 [[TMP693]] to i8 +// CHECK-NEXT: store i8 [[TMP694]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP695:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP696:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP697:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP695]], i8 [[TMP696]] acquire acquire, align 1 +// CHECK-NEXT: [[TMP698:%.*]] = extractvalue { i8, i1 } [[TMP697]], 0 +// CHECK-NEXT: [[TMP699:%.*]] = extractvalue { i8, i1 } [[TMP697]], 1 +// CHECK-NEXT: br i1 [[TMP699]], label [[UCX_ATOMIC_EXIT67:%.*]], label [[UCX_ATOMIC_CONT68:%.*]] +// CHECK: ucx.atomic.cont68: +// CHECK-NEXT: store i8 [[TMP698]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT67]] +// CHECK: ucx.atomic.exit67: +// CHECK-NEXT: [[TMP700:%.*]] = extractvalue { i8, i1 } [[TMP697]], 1 +// CHECK-NEXT: [[TMP701:%.*]] = zext i1 [[TMP700]] to i8 +// CHECK-NEXT: store i8 [[TMP701]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP702:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP703:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP702]] monotonic, align 1 +// CHECK-NEXT: store i8 [[TMP703]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP704:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP705:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP704]] monotonic, align 1 +// CHECK-NEXT: store i8 [[TMP705]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP706:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP707:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP706]] monotonic, align 1 +// CHECK-NEXT: store i8 [[TMP707]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP708:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP709:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP708]] monotonic, align 1 +// CHECK-NEXT: store i8 [[TMP709]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP710:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP711:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP712:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP710]], i8 [[TMP711]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP713:%.*]] = extractvalue { i8, i1 } [[TMP712]], 0 +// CHECK-NEXT: store i8 [[TMP713]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP714:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP715:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP716:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP714]], i8 [[TMP715]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP717:%.*]] = extractvalue { i8, i1 } [[TMP716]], 0 +// CHECK-NEXT: store i8 [[TMP717]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP718:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP719:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP718]] monotonic, align 1 +// CHECK-NEXT: [[TMP720:%.*]] = icmp ugt i8 [[TMP719]], [[TMP718]] +// CHECK-NEXT: [[TMP721:%.*]] = select i1 [[TMP720]], i8 [[TMP718]], i8 [[TMP719]] +// CHECK-NEXT: store i8 [[TMP721]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP722:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP723:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP722]] monotonic, align 1 +// CHECK-NEXT: [[TMP724:%.*]] = icmp ult i8 [[TMP723]], [[TMP722]] +// CHECK-NEXT: [[TMP725:%.*]] = select i1 [[TMP724]], i8 [[TMP722]], i8 [[TMP723]] +// CHECK-NEXT: store i8 [[TMP725]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP726:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP727:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP726]] monotonic, align 1 +// CHECK-NEXT: [[TMP728:%.*]] = icmp ult i8 [[TMP727]], [[TMP726]] +// CHECK-NEXT: [[TMP729:%.*]] = select i1 [[TMP728]], i8 [[TMP726]], i8 [[TMP727]] +// CHECK-NEXT: store i8 [[TMP729]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP730:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP731:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP730]] monotonic, align 1 +// CHECK-NEXT: [[TMP732:%.*]] = icmp ugt i8 [[TMP731]], [[TMP730]] +// CHECK-NEXT: [[TMP733:%.*]] = select i1 [[TMP732]], i8 [[TMP730]], i8 [[TMP731]] +// CHECK-NEXT: store i8 [[TMP733]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP734:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP735:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP736:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP734]], i8 [[TMP735]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP737:%.*]] = extractvalue { i8, i1 } [[TMP736]], 0 +// CHECK-NEXT: [[TMP738:%.*]] = extractvalue { i8, i1 } [[TMP736]], 1 +// CHECK-NEXT: [[TMP739:%.*]] = select i1 [[TMP738]], i8 [[TMP734]], i8 [[TMP737]] +// CHECK-NEXT: store i8 [[TMP739]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP740:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP741:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP742:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP740]], i8 [[TMP741]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP743:%.*]] = extractvalue { i8, i1 } [[TMP742]], 0 +// CHECK-NEXT: [[TMP744:%.*]] = extractvalue { i8, i1 } [[TMP742]], 1 +// CHECK-NEXT: [[TMP745:%.*]] = select i1 [[TMP744]], i8 [[TMP740]], i8 [[TMP743]] +// CHECK-NEXT: store i8 [[TMP745]], i8* [[UCV]], align 1 +// CHECK-NEXT: [[TMP746:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP747:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP748:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP746]], i8 [[TMP747]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP749:%.*]] = extractvalue { i8, i1 } [[TMP748]], 0 +// CHECK-NEXT: [[TMP750:%.*]] = extractvalue { i8, i1 } [[TMP748]], 1 +// CHECK-NEXT: br i1 [[TMP750]], label [[UCX_ATOMIC_EXIT69:%.*]], label [[UCX_ATOMIC_CONT70:%.*]] +// CHECK: ucx.atomic.cont70: +// CHECK-NEXT: store i8 [[TMP749]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT69]] +// CHECK: ucx.atomic.exit69: +// CHECK-NEXT: [[TMP751:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP752:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP753:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP751]], i8 [[TMP752]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP754:%.*]] = extractvalue { i8, i1 } [[TMP753]], 0 +// CHECK-NEXT: [[TMP755:%.*]] = extractvalue { i8, i1 } [[TMP753]], 1 +// CHECK-NEXT: br i1 [[TMP755]], label [[UCX_ATOMIC_EXIT71:%.*]], label [[UCX_ATOMIC_CONT72:%.*]] +// CHECK: ucx.atomic.cont72: +// CHECK-NEXT: store i8 [[TMP754]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT71]] +// CHECK: ucx.atomic.exit71: +// CHECK-NEXT: [[TMP756:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP757:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP758:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP756]], i8 [[TMP757]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP759:%.*]] = extractvalue { i8, i1 } [[TMP758]], 1 +// CHECK-NEXT: [[TMP760:%.*]] = zext i1 [[TMP759]] to i8 +// CHECK-NEXT: store i8 [[TMP760]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP761:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP762:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP763:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP761]], i8 [[TMP762]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP764:%.*]] = extractvalue { i8, i1 } [[TMP763]], 1 +// CHECK-NEXT: [[TMP765:%.*]] = zext i1 [[TMP764]] to i8 +// CHECK-NEXT: store i8 [[TMP765]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP766:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP767:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP768:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP766]], i8 [[TMP767]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP769:%.*]] = extractvalue { i8, i1 } [[TMP768]], 0 +// CHECK-NEXT: [[TMP770:%.*]] = extractvalue { i8, i1 } [[TMP768]], 1 +// CHECK-NEXT: br i1 [[TMP770]], label [[UCX_ATOMIC_EXIT73:%.*]], label [[UCX_ATOMIC_CONT74:%.*]] +// CHECK: ucx.atomic.cont74: +// CHECK-NEXT: store i8 [[TMP769]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT73]] +// CHECK: ucx.atomic.exit73: +// CHECK-NEXT: [[TMP771:%.*]] = extractvalue { i8, i1 } [[TMP768]], 1 +// CHECK-NEXT: [[TMP772:%.*]] = zext i1 [[TMP771]] to i8 +// CHECK-NEXT: store i8 [[TMP772]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP773:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP774:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP775:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP773]], i8 [[TMP774]] monotonic monotonic, align 1 +// CHECK-NEXT: [[TMP776:%.*]] = extractvalue { i8, i1 } [[TMP775]], 0 +// CHECK-NEXT: [[TMP777:%.*]] = extractvalue { i8, i1 } [[TMP775]], 1 +// CHECK-NEXT: br i1 [[TMP777]], label [[UCX_ATOMIC_EXIT75:%.*]], label [[UCX_ATOMIC_CONT76:%.*]] +// CHECK: ucx.atomic.cont76: +// CHECK-NEXT: store i8 [[TMP776]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT75]] +// CHECK: ucx.atomic.exit75: +// CHECK-NEXT: [[TMP778:%.*]] = extractvalue { i8, i1 } [[TMP775]], 1 +// CHECK-NEXT: [[TMP779:%.*]] = zext i1 [[TMP778]] to i8 +// CHECK-NEXT: store i8 [[TMP779]], i8* [[UCR]], align 1 +// CHECK-NEXT: [[TMP780:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP781:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP780]] release, align 1 +// CHECK-NEXT: store i8 [[TMP781]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP782:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP783:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP782]] release, align 1 +// CHECK-NEXT: store i8 [[TMP783]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP784:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP785:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP784]] release, align 1 +// CHECK-NEXT: store i8 [[TMP785]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP786:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP787:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP786]] release, align 1 +// CHECK-NEXT: store i8 [[TMP787]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP788:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP789:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP790:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP788]], i8 [[TMP789]] release monotonic, align 1 +// CHECK-NEXT: [[TMP791:%.*]] = extractvalue { i8, i1 } [[TMP790]], 0 +// CHECK-NEXT: store i8 [[TMP791]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP792:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP793:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP794:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP792]], i8 [[TMP793]] release monotonic, align 1 +// CHECK-NEXT: [[TMP795:%.*]] = extractvalue { i8, i1 } [[TMP794]], 0 +// CHECK-NEXT: store i8 [[TMP795]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP796:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP797:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP796]] release, align 1 +// CHECK-NEXT: [[TMP798:%.*]] = icmp ugt i8 [[TMP797]], [[TMP796]] +// CHECK-NEXT: [[TMP799:%.*]] = select i1 [[TMP798]], i8 [[TMP796]], i8 [[TMP797]] +// CHECK-NEXT: store i8 [[TMP799]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP800:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP801:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP800]] release, align 1 +// CHECK-NEXT: [[TMP802:%.*]] = icmp ult i8 [[TMP801]], [[TMP800]] +// CHECK-NEXT: [[TMP803:%.*]] = select i1 [[TMP802]], i8 [[TMP800]], i8 [[TMP801]] +// CHECK-NEXT: store i8 [[TMP803]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP804:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP805:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP804]] release, align 1 +// CHECK-NEXT: [[TMP806:%.*]] = icmp ult i8 [[TMP805]], [[TMP804]] +// CHECK-NEXT: [[TMP807:%.*]] = select i1 [[TMP806]], i8 [[TMP804]], i8 [[TMP805]] +// CHECK-NEXT: store i8 [[TMP807]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP808:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP809:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP808]] release, align 1 +// CHECK-NEXT: [[TMP810:%.*]] = icmp ugt i8 [[TMP809]], [[TMP808]] +// CHECK-NEXT: [[TMP811:%.*]] = select i1 [[TMP810]], i8 [[TMP808]], i8 [[TMP809]] +// CHECK-NEXT: store i8 [[TMP811]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP812:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP813:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP814:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP812]], i8 [[TMP813]] release monotonic, align 1 +// CHECK-NEXT: [[TMP815:%.*]] = extractvalue { i8, i1 } [[TMP814]], 0 +// CHECK-NEXT: [[TMP816:%.*]] = extractvalue { i8, i1 } [[TMP814]], 1 +// CHECK-NEXT: [[TMP817:%.*]] = select i1 [[TMP816]], i8 [[TMP812]], i8 [[TMP815]] +// CHECK-NEXT: store i8 [[TMP817]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP818:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP819:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP820:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP818]], i8 [[TMP819]] release monotonic, align 1 +// CHECK-NEXT: [[TMP821:%.*]] = extractvalue { i8, i1 } [[TMP820]], 0 +// CHECK-NEXT: [[TMP822:%.*]] = extractvalue { i8, i1 } [[TMP820]], 1 +// CHECK-NEXT: [[TMP823:%.*]] = select i1 [[TMP822]], i8 [[TMP818]], i8 [[TMP821]] +// CHECK-NEXT: store i8 [[TMP823]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP824:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP825:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP826:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP824]], i8 [[TMP825]] release monotonic, align 1 +// CHECK-NEXT: [[TMP827:%.*]] = extractvalue { i8, i1 } [[TMP826]], 0 +// CHECK-NEXT: [[TMP828:%.*]] = extractvalue { i8, i1 } [[TMP826]], 1 +// CHECK-NEXT: br i1 [[TMP828]], label [[UCX_ATOMIC_EXIT77:%.*]], label [[UCX_ATOMIC_CONT78:%.*]] +// CHECK: ucx.atomic.cont78: +// CHECK-NEXT: store i8 [[TMP827]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT77]] +// CHECK: ucx.atomic.exit77: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP829:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP830:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP831:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP829]], i8 [[TMP830]] release monotonic, align 1 +// CHECK-NEXT: [[TMP832:%.*]] = extractvalue { i8, i1 } [[TMP831]], 0 +// CHECK-NEXT: [[TMP833:%.*]] = extractvalue { i8, i1 } [[TMP831]], 1 +// CHECK-NEXT: br i1 [[TMP833]], label [[UCX_ATOMIC_EXIT79:%.*]], label [[UCX_ATOMIC_CONT80:%.*]] +// CHECK: ucx.atomic.cont80: +// CHECK-NEXT: store i8 [[TMP832]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT79]] +// CHECK: ucx.atomic.exit79: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP834:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP835:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP836:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP834]], i8 [[TMP835]] release monotonic, align 1 +// CHECK-NEXT: [[TMP837:%.*]] = extractvalue { i8, i1 } [[TMP836]], 1 +// CHECK-NEXT: [[TMP838:%.*]] = zext i1 [[TMP837]] to i8 +// CHECK-NEXT: store i8 [[TMP838]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP839:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP840:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP841:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP839]], i8 [[TMP840]] release monotonic, align 1 +// CHECK-NEXT: [[TMP842:%.*]] = extractvalue { i8, i1 } [[TMP841]], 1 +// CHECK-NEXT: [[TMP843:%.*]] = zext i1 [[TMP842]] to i8 +// CHECK-NEXT: store i8 [[TMP843]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP844:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP845:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP846:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP844]], i8 [[TMP845]] release monotonic, align 1 +// CHECK-NEXT: [[TMP847:%.*]] = extractvalue { i8, i1 } [[TMP846]], 0 +// CHECK-NEXT: [[TMP848:%.*]] = extractvalue { i8, i1 } [[TMP846]], 1 +// CHECK-NEXT: br i1 [[TMP848]], label [[UCX_ATOMIC_EXIT81:%.*]], label [[UCX_ATOMIC_CONT82:%.*]] +// CHECK: ucx.atomic.cont82: +// CHECK-NEXT: store i8 [[TMP847]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT81]] +// CHECK: ucx.atomic.exit81: +// CHECK-NEXT: [[TMP849:%.*]] = extractvalue { i8, i1 } [[TMP846]], 1 +// CHECK-NEXT: [[TMP850:%.*]] = zext i1 [[TMP849]] to i8 +// CHECK-NEXT: store i8 [[TMP850]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP851:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP852:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP853:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP851]], i8 [[TMP852]] release monotonic, align 1 +// CHECK-NEXT: [[TMP854:%.*]] = extractvalue { i8, i1 } [[TMP853]], 0 +// CHECK-NEXT: [[TMP855:%.*]] = extractvalue { i8, i1 } [[TMP853]], 1 +// CHECK-NEXT: br i1 [[TMP855]], label [[UCX_ATOMIC_EXIT83:%.*]], label [[UCX_ATOMIC_CONT84:%.*]] +// CHECK: ucx.atomic.cont84: +// CHECK-NEXT: store i8 [[TMP854]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT83]] +// CHECK: ucx.atomic.exit83: +// CHECK-NEXT: [[TMP856:%.*]] = extractvalue { i8, i1 } [[TMP853]], 1 +// CHECK-NEXT: [[TMP857:%.*]] = zext i1 [[TMP856]] to i8 +// CHECK-NEXT: store i8 [[TMP857]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP858:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP859:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP858]] seq_cst, align 1 +// CHECK-NEXT: store i8 [[TMP859]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP860:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP861:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP860]] seq_cst, align 1 +// CHECK-NEXT: store i8 [[TMP861]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP862:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP863:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP862]] seq_cst, align 1 +// CHECK-NEXT: store i8 [[TMP863]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP864:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP865:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP864]] seq_cst, align 1 +// CHECK-NEXT: store i8 [[TMP865]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP866:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP867:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP868:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP866]], i8 [[TMP867]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP869:%.*]] = extractvalue { i8, i1 } [[TMP868]], 0 +// CHECK-NEXT: store i8 [[TMP869]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP870:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP871:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP872:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP870]], i8 [[TMP871]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP873:%.*]] = extractvalue { i8, i1 } [[TMP872]], 0 +// CHECK-NEXT: store i8 [[TMP873]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP874:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP875:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP874]] seq_cst, align 1 +// CHECK-NEXT: [[TMP876:%.*]] = icmp ugt i8 [[TMP875]], [[TMP874]] +// CHECK-NEXT: [[TMP877:%.*]] = select i1 [[TMP876]], i8 [[TMP874]], i8 [[TMP875]] +// CHECK-NEXT: store i8 [[TMP877]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP878:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP879:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP878]] seq_cst, align 1 +// CHECK-NEXT: [[TMP880:%.*]] = icmp ult i8 [[TMP879]], [[TMP878]] +// CHECK-NEXT: [[TMP881:%.*]] = select i1 [[TMP880]], i8 [[TMP878]], i8 [[TMP879]] +// CHECK-NEXT: store i8 [[TMP881]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP882:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP883:%.*]] = atomicrmw umin i8* [[UCX]], i8 [[TMP882]] seq_cst, align 1 +// CHECK-NEXT: [[TMP884:%.*]] = icmp ult i8 [[TMP883]], [[TMP882]] +// CHECK-NEXT: [[TMP885:%.*]] = select i1 [[TMP884]], i8 [[TMP882]], i8 [[TMP883]] +// CHECK-NEXT: store i8 [[TMP885]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP886:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP887:%.*]] = atomicrmw umax i8* [[UCX]], i8 [[TMP886]] seq_cst, align 1 +// CHECK-NEXT: [[TMP888:%.*]] = icmp ugt i8 [[TMP887]], [[TMP886]] +// CHECK-NEXT: [[TMP889:%.*]] = select i1 [[TMP888]], i8 [[TMP886]], i8 [[TMP887]] +// CHECK-NEXT: store i8 [[TMP889]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP890:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP891:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP892:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP890]], i8 [[TMP891]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP893:%.*]] = extractvalue { i8, i1 } [[TMP892]], 0 +// CHECK-NEXT: [[TMP894:%.*]] = extractvalue { i8, i1 } [[TMP892]], 1 +// CHECK-NEXT: [[TMP895:%.*]] = select i1 [[TMP894]], i8 [[TMP890]], i8 [[TMP893]] +// CHECK-NEXT: store i8 [[TMP895]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP896:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP897:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP898:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP896]], i8 [[TMP897]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP899:%.*]] = extractvalue { i8, i1 } [[TMP898]], 0 +// CHECK-NEXT: [[TMP900:%.*]] = extractvalue { i8, i1 } [[TMP898]], 1 +// CHECK-NEXT: [[TMP901:%.*]] = select i1 [[TMP900]], i8 [[TMP896]], i8 [[TMP899]] +// CHECK-NEXT: store i8 [[TMP901]], i8* [[UCV]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP902:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP903:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP904:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP902]], i8 [[TMP903]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP905:%.*]] = extractvalue { i8, i1 } [[TMP904]], 0 +// CHECK-NEXT: [[TMP906:%.*]] = extractvalue { i8, i1 } [[TMP904]], 1 +// CHECK-NEXT: br i1 [[TMP906]], label [[UCX_ATOMIC_EXIT85:%.*]], label [[UCX_ATOMIC_CONT86:%.*]] +// CHECK: ucx.atomic.cont86: +// CHECK-NEXT: store i8 [[TMP905]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT85]] +// CHECK: ucx.atomic.exit85: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP907:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP908:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP909:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP907]], i8 [[TMP908]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP910:%.*]] = extractvalue { i8, i1 } [[TMP909]], 0 +// CHECK-NEXT: [[TMP911:%.*]] = extractvalue { i8, i1 } [[TMP909]], 1 +// CHECK-NEXT: br i1 [[TMP911]], label [[UCX_ATOMIC_EXIT87:%.*]], label [[UCX_ATOMIC_CONT88:%.*]] +// CHECK: ucx.atomic.cont88: +// CHECK-NEXT: store i8 [[TMP910]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT87]] +// CHECK: ucx.atomic.exit87: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP912:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP913:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP914:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP912]], i8 [[TMP913]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP915:%.*]] = extractvalue { i8, i1 } [[TMP914]], 1 +// CHECK-NEXT: [[TMP916:%.*]] = zext i1 [[TMP915]] to i8 +// CHECK-NEXT: store i8 [[TMP916]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP917:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP918:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP919:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP917]], i8 [[TMP918]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP920:%.*]] = extractvalue { i8, i1 } [[TMP919]], 1 +// CHECK-NEXT: [[TMP921:%.*]] = zext i1 [[TMP920]] to i8 +// CHECK-NEXT: store i8 [[TMP921]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP922:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP923:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP924:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP922]], i8 [[TMP923]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP925:%.*]] = extractvalue { i8, i1 } [[TMP924]], 0 +// CHECK-NEXT: [[TMP926:%.*]] = extractvalue { i8, i1 } [[TMP924]], 1 +// CHECK-NEXT: br i1 [[TMP926]], label [[UCX_ATOMIC_EXIT89:%.*]], label [[UCX_ATOMIC_CONT90:%.*]] +// CHECK: ucx.atomic.cont90: +// CHECK-NEXT: store i8 [[TMP925]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT89]] +// CHECK: ucx.atomic.exit89: +// CHECK-NEXT: [[TMP927:%.*]] = extractvalue { i8, i1 } [[TMP924]], 1 +// CHECK-NEXT: [[TMP928:%.*]] = zext i1 [[TMP927]] to i8 +// CHECK-NEXT: store i8 [[TMP928]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP929:%.*]] = load i8, i8* [[UCE]], align 1 +// CHECK-NEXT: [[TMP930:%.*]] = load i8, i8* [[UCD]], align 1 +// CHECK-NEXT: [[TMP931:%.*]] = cmpxchg i8* [[UCX]], i8 [[TMP929]], i8 [[TMP930]] seq_cst seq_cst, align 1 +// CHECK-NEXT: [[TMP932:%.*]] = extractvalue { i8, i1 } [[TMP931]], 0 +// CHECK-NEXT: [[TMP933:%.*]] = extractvalue { i8, i1 } [[TMP931]], 1 +// CHECK-NEXT: br i1 [[TMP933]], label [[UCX_ATOMIC_EXIT91:%.*]], label [[UCX_ATOMIC_CONT92:%.*]] +// CHECK: ucx.atomic.cont92: +// CHECK-NEXT: store i8 [[TMP932]], i8* [[UCV]], align 1 +// CHECK-NEXT: br label [[UCX_ATOMIC_EXIT91]] +// CHECK: ucx.atomic.exit91: +// CHECK-NEXT: [[TMP934:%.*]] = extractvalue { i8, i1 } [[TMP931]], 1 +// CHECK-NEXT: [[TMP935:%.*]] = zext i1 [[TMP934]] to i8 +// CHECK-NEXT: store i8 [[TMP935]], i8* [[UCR]], align 1 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP936:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP937:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP936]] monotonic, align 2 +// CHECK-NEXT: store volatile i16 [[TMP937]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP938:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP939:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP938]] monotonic, align 2 +// CHECK-NEXT: store volatile i16 [[TMP939]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP940:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP941:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP940]] monotonic, align 2 +// CHECK-NEXT: store volatile i16 [[TMP941]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP942:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP943:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP942]] monotonic, align 2 +// CHECK-NEXT: store volatile i16 [[TMP943]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP944:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP945:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP946:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP944]], i16 [[TMP945]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP947:%.*]] = extractvalue { i16, i1 } [[TMP946]], 0 +// CHECK-NEXT: store volatile i16 [[TMP947]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP948:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP949:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP950:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP948]], i16 [[TMP949]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP951:%.*]] = extractvalue { i16, i1 } [[TMP950]], 0 +// CHECK-NEXT: store volatile i16 [[TMP951]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP952:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP953:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP952]] monotonic, align 2 +// CHECK-NEXT: [[TMP954:%.*]] = icmp ugt i16 [[TMP953]], [[TMP952]] +// CHECK-NEXT: [[TMP955:%.*]] = select i1 [[TMP954]], i16 [[TMP952]], i16 [[TMP953]] +// CHECK-NEXT: store volatile i16 [[TMP955]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP956:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP957:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP956]] monotonic, align 2 +// CHECK-NEXT: [[TMP958:%.*]] = icmp ult i16 [[TMP957]], [[TMP956]] +// CHECK-NEXT: [[TMP959:%.*]] = select i1 [[TMP958]], i16 [[TMP956]], i16 [[TMP957]] +// CHECK-NEXT: store volatile i16 [[TMP959]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP960:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP961:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP960]] monotonic, align 2 +// CHECK-NEXT: [[TMP962:%.*]] = icmp ult i16 [[TMP961]], [[TMP960]] +// CHECK-NEXT: [[TMP963:%.*]] = select i1 [[TMP962]], i16 [[TMP960]], i16 [[TMP961]] +// CHECK-NEXT: store volatile i16 [[TMP963]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP964:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP965:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP964]] monotonic, align 2 +// CHECK-NEXT: [[TMP966:%.*]] = icmp ugt i16 [[TMP965]], [[TMP964]] +// CHECK-NEXT: [[TMP967:%.*]] = select i1 [[TMP966]], i16 [[TMP964]], i16 [[TMP965]] +// CHECK-NEXT: store volatile i16 [[TMP967]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP968:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP969:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP970:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP968]], i16 [[TMP969]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP971:%.*]] = extractvalue { i16, i1 } [[TMP970]], 0 +// CHECK-NEXT: [[TMP972:%.*]] = extractvalue { i16, i1 } [[TMP970]], 1 +// CHECK-NEXT: [[TMP973:%.*]] = select i1 [[TMP972]], i16 [[TMP968]], i16 [[TMP971]] +// CHECK-NEXT: store volatile i16 [[TMP973]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP974:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP975:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP976:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP974]], i16 [[TMP975]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP977:%.*]] = extractvalue { i16, i1 } [[TMP976]], 0 +// CHECK-NEXT: [[TMP978:%.*]] = extractvalue { i16, i1 } [[TMP976]], 1 +// CHECK-NEXT: [[TMP979:%.*]] = select i1 [[TMP978]], i16 [[TMP974]], i16 [[TMP977]] +// CHECK-NEXT: store volatile i16 [[TMP979]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP980:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP981:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP982:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP980]], i16 [[TMP981]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP983:%.*]] = extractvalue { i16, i1 } [[TMP982]], 0 +// CHECK-NEXT: [[TMP984:%.*]] = extractvalue { i16, i1 } [[TMP982]], 1 +// CHECK-NEXT: br i1 [[TMP984]], label [[SX_ATOMIC_EXIT:%.*]], label [[SX_ATOMIC_CONT:%.*]] +// CHECK: sx.atomic.cont: +// CHECK-NEXT: store i16 [[TMP983]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT]] +// CHECK: sx.atomic.exit: +// CHECK-NEXT: [[TMP985:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP986:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP987:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP985]], i16 [[TMP986]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP988:%.*]] = extractvalue { i16, i1 } [[TMP987]], 0 +// CHECK-NEXT: [[TMP989:%.*]] = extractvalue { i16, i1 } [[TMP987]], 1 +// CHECK-NEXT: br i1 [[TMP989]], label [[SX_ATOMIC_EXIT93:%.*]], label [[SX_ATOMIC_CONT94:%.*]] +// CHECK: sx.atomic.cont94: +// CHECK-NEXT: store i16 [[TMP988]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT93]] +// CHECK: sx.atomic.exit93: +// CHECK-NEXT: [[TMP990:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP991:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP992:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP990]], i16 [[TMP991]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP993:%.*]] = extractvalue { i16, i1 } [[TMP992]], 1 +// CHECK-NEXT: [[TMP994:%.*]] = zext i1 [[TMP993]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP994]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP995:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP996:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP997:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP995]], i16 [[TMP996]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP998:%.*]] = extractvalue { i16, i1 } [[TMP997]], 1 +// CHECK-NEXT: [[TMP999:%.*]] = zext i1 [[TMP998]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP999]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP1000:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1001:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1002:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1000]], i16 [[TMP1001]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1003:%.*]] = extractvalue { i16, i1 } [[TMP1002]], 0 +// CHECK-NEXT: [[TMP1004:%.*]] = extractvalue { i16, i1 } [[TMP1002]], 1 +// CHECK-NEXT: br i1 [[TMP1004]], label [[SX_ATOMIC_EXIT95:%.*]], label [[SX_ATOMIC_CONT96:%.*]] +// CHECK: sx.atomic.cont96: +// CHECK-NEXT: store i16 [[TMP1003]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT95]] +// CHECK: sx.atomic.exit95: +// CHECK-NEXT: [[TMP1005:%.*]] = extractvalue { i16, i1 } [[TMP1002]], 1 +// CHECK-NEXT: [[TMP1006:%.*]] = zext i1 [[TMP1005]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1006]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP1007:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1008:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1009:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1007]], i16 [[TMP1008]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1010:%.*]] = extractvalue { i16, i1 } [[TMP1009]], 0 +// CHECK-NEXT: [[TMP1011:%.*]] = extractvalue { i16, i1 } [[TMP1009]], 1 +// CHECK-NEXT: br i1 [[TMP1011]], label [[SX_ATOMIC_EXIT97:%.*]], label [[SX_ATOMIC_CONT98:%.*]] +// CHECK: sx.atomic.cont98: +// CHECK-NEXT: store i16 [[TMP1010]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT97]] +// CHECK: sx.atomic.exit97: +// CHECK-NEXT: [[TMP1012:%.*]] = extractvalue { i16, i1 } [[TMP1009]], 1 +// CHECK-NEXT: [[TMP1013:%.*]] = zext i1 [[TMP1012]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1013]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP1014:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1015:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1014]] acq_rel, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1015]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1016:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1017:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1016]] acq_rel, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1017]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1018:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1019:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1018]] acq_rel, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1019]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1020:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1021:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1020]] acq_rel, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1021]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1022:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1023:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1024:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1022]], i16 [[TMP1023]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1025:%.*]] = extractvalue { i16, i1 } [[TMP1024]], 0 +// CHECK-NEXT: store volatile i16 [[TMP1025]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1026:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1027:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1028:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1026]], i16 [[TMP1027]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1029:%.*]] = extractvalue { i16, i1 } [[TMP1028]], 0 +// CHECK-NEXT: store volatile i16 [[TMP1029]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1030:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1031:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1030]] acq_rel, align 2 +// CHECK-NEXT: [[TMP1032:%.*]] = icmp ugt i16 [[TMP1031]], [[TMP1030]] +// CHECK-NEXT: [[TMP1033:%.*]] = select i1 [[TMP1032]], i16 [[TMP1030]], i16 [[TMP1031]] +// CHECK-NEXT: store volatile i16 [[TMP1033]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1034:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1035:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1034]] acq_rel, align 2 +// CHECK-NEXT: [[TMP1036:%.*]] = icmp ult i16 [[TMP1035]], [[TMP1034]] +// CHECK-NEXT: [[TMP1037:%.*]] = select i1 [[TMP1036]], i16 [[TMP1034]], i16 [[TMP1035]] +// CHECK-NEXT: store volatile i16 [[TMP1037]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1038:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1039:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1038]] acq_rel, align 2 +// CHECK-NEXT: [[TMP1040:%.*]] = icmp ult i16 [[TMP1039]], [[TMP1038]] +// CHECK-NEXT: [[TMP1041:%.*]] = select i1 [[TMP1040]], i16 [[TMP1038]], i16 [[TMP1039]] +// CHECK-NEXT: store volatile i16 [[TMP1041]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1042:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1043:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1042]] acq_rel, align 2 +// CHECK-NEXT: [[TMP1044:%.*]] = icmp ugt i16 [[TMP1043]], [[TMP1042]] +// CHECK-NEXT: [[TMP1045:%.*]] = select i1 [[TMP1044]], i16 [[TMP1042]], i16 [[TMP1043]] +// CHECK-NEXT: store volatile i16 [[TMP1045]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1046:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1047:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1048:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1046]], i16 [[TMP1047]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1049:%.*]] = extractvalue { i16, i1 } [[TMP1048]], 0 +// CHECK-NEXT: [[TMP1050:%.*]] = extractvalue { i16, i1 } [[TMP1048]], 1 +// CHECK-NEXT: [[TMP1051:%.*]] = select i1 [[TMP1050]], i16 [[TMP1046]], i16 [[TMP1049]] +// CHECK-NEXT: store volatile i16 [[TMP1051]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1052:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1053:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1054:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1052]], i16 [[TMP1053]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1055:%.*]] = extractvalue { i16, i1 } [[TMP1054]], 0 +// CHECK-NEXT: [[TMP1056:%.*]] = extractvalue { i16, i1 } [[TMP1054]], 1 +// CHECK-NEXT: [[TMP1057:%.*]] = select i1 [[TMP1056]], i16 [[TMP1052]], i16 [[TMP1055]] +// CHECK-NEXT: store volatile i16 [[TMP1057]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1058:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1059:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1060:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1058]], i16 [[TMP1059]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1061:%.*]] = extractvalue { i16, i1 } [[TMP1060]], 0 +// CHECK-NEXT: [[TMP1062:%.*]] = extractvalue { i16, i1 } [[TMP1060]], 1 +// CHECK-NEXT: br i1 [[TMP1062]], label [[SX_ATOMIC_EXIT99:%.*]], label [[SX_ATOMIC_CONT100:%.*]] +// CHECK: sx.atomic.cont100: +// CHECK-NEXT: store i16 [[TMP1061]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT99]] +// CHECK: sx.atomic.exit99: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1063:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1064:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1065:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1063]], i16 [[TMP1064]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1066:%.*]] = extractvalue { i16, i1 } [[TMP1065]], 0 +// CHECK-NEXT: [[TMP1067:%.*]] = extractvalue { i16, i1 } [[TMP1065]], 1 +// CHECK-NEXT: br i1 [[TMP1067]], label [[SX_ATOMIC_EXIT101:%.*]], label [[SX_ATOMIC_CONT102:%.*]] +// CHECK: sx.atomic.cont102: +// CHECK-NEXT: store i16 [[TMP1066]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT101]] +// CHECK: sx.atomic.exit101: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1068:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1069:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1070:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1068]], i16 [[TMP1069]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1071:%.*]] = extractvalue { i16, i1 } [[TMP1070]], 1 +// CHECK-NEXT: [[TMP1072:%.*]] = zext i1 [[TMP1071]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1072]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1073:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1074:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1075:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1073]], i16 [[TMP1074]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1076:%.*]] = extractvalue { i16, i1 } [[TMP1075]], 1 +// CHECK-NEXT: [[TMP1077:%.*]] = zext i1 [[TMP1076]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1077]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1078:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1079:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1080:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1078]], i16 [[TMP1079]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1081:%.*]] = extractvalue { i16, i1 } [[TMP1080]], 0 +// CHECK-NEXT: [[TMP1082:%.*]] = extractvalue { i16, i1 } [[TMP1080]], 1 +// CHECK-NEXT: br i1 [[TMP1082]], label [[SX_ATOMIC_EXIT103:%.*]], label [[SX_ATOMIC_CONT104:%.*]] +// CHECK: sx.atomic.cont104: +// CHECK-NEXT: store i16 [[TMP1081]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT103]] +// CHECK: sx.atomic.exit103: +// CHECK-NEXT: [[TMP1083:%.*]] = extractvalue { i16, i1 } [[TMP1080]], 1 +// CHECK-NEXT: [[TMP1084:%.*]] = zext i1 [[TMP1083]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1084]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1085:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1086:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1087:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1085]], i16 [[TMP1086]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1088:%.*]] = extractvalue { i16, i1 } [[TMP1087]], 0 +// CHECK-NEXT: [[TMP1089:%.*]] = extractvalue { i16, i1 } [[TMP1087]], 1 +// CHECK-NEXT: br i1 [[TMP1089]], label [[SX_ATOMIC_EXIT105:%.*]], label [[SX_ATOMIC_CONT106:%.*]] +// CHECK: sx.atomic.cont106: +// CHECK-NEXT: store i16 [[TMP1088]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT105]] +// CHECK: sx.atomic.exit105: +// CHECK-NEXT: [[TMP1090:%.*]] = extractvalue { i16, i1 } [[TMP1087]], 1 +// CHECK-NEXT: [[TMP1091:%.*]] = zext i1 [[TMP1090]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1091]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1092:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1093:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1092]] acquire, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1093]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1094:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1095:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1094]] acquire, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1095]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1096:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1097:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1096]] acquire, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1097]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1098:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1099:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1098]] acquire, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1099]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1100:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1101:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1102:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1100]], i16 [[TMP1101]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1103:%.*]] = extractvalue { i16, i1 } [[TMP1102]], 0 +// CHECK-NEXT: store volatile i16 [[TMP1103]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1104:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1105:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1106:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1104]], i16 [[TMP1105]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1107:%.*]] = extractvalue { i16, i1 } [[TMP1106]], 0 +// CHECK-NEXT: store volatile i16 [[TMP1107]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1108:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1109:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1108]] acquire, align 2 +// CHECK-NEXT: [[TMP1110:%.*]] = icmp ugt i16 [[TMP1109]], [[TMP1108]] +// CHECK-NEXT: [[TMP1111:%.*]] = select i1 [[TMP1110]], i16 [[TMP1108]], i16 [[TMP1109]] +// CHECK-NEXT: store volatile i16 [[TMP1111]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1112:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1113:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1112]] acquire, align 2 +// CHECK-NEXT: [[TMP1114:%.*]] = icmp ult i16 [[TMP1113]], [[TMP1112]] +// CHECK-NEXT: [[TMP1115:%.*]] = select i1 [[TMP1114]], i16 [[TMP1112]], i16 [[TMP1113]] +// CHECK-NEXT: store volatile i16 [[TMP1115]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1116:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1117:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1116]] acquire, align 2 +// CHECK-NEXT: [[TMP1118:%.*]] = icmp ult i16 [[TMP1117]], [[TMP1116]] +// CHECK-NEXT: [[TMP1119:%.*]] = select i1 [[TMP1118]], i16 [[TMP1116]], i16 [[TMP1117]] +// CHECK-NEXT: store volatile i16 [[TMP1119]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1120:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1121:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1120]] acquire, align 2 +// CHECK-NEXT: [[TMP1122:%.*]] = icmp ugt i16 [[TMP1121]], [[TMP1120]] +// CHECK-NEXT: [[TMP1123:%.*]] = select i1 [[TMP1122]], i16 [[TMP1120]], i16 [[TMP1121]] +// CHECK-NEXT: store volatile i16 [[TMP1123]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1124:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1125:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1126:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1124]], i16 [[TMP1125]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1127:%.*]] = extractvalue { i16, i1 } [[TMP1126]], 0 +// CHECK-NEXT: [[TMP1128:%.*]] = extractvalue { i16, i1 } [[TMP1126]], 1 +// CHECK-NEXT: [[TMP1129:%.*]] = select i1 [[TMP1128]], i16 [[TMP1124]], i16 [[TMP1127]] +// CHECK-NEXT: store volatile i16 [[TMP1129]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1130:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1131:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1132:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1130]], i16 [[TMP1131]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1133:%.*]] = extractvalue { i16, i1 } [[TMP1132]], 0 +// CHECK-NEXT: [[TMP1134:%.*]] = extractvalue { i16, i1 } [[TMP1132]], 1 +// CHECK-NEXT: [[TMP1135:%.*]] = select i1 [[TMP1134]], i16 [[TMP1130]], i16 [[TMP1133]] +// CHECK-NEXT: store volatile i16 [[TMP1135]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1136:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1137:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1138:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1136]], i16 [[TMP1137]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1139:%.*]] = extractvalue { i16, i1 } [[TMP1138]], 0 +// CHECK-NEXT: [[TMP1140:%.*]] = extractvalue { i16, i1 } [[TMP1138]], 1 +// CHECK-NEXT: br i1 [[TMP1140]], label [[SX_ATOMIC_EXIT107:%.*]], label [[SX_ATOMIC_CONT108:%.*]] +// CHECK: sx.atomic.cont108: +// CHECK-NEXT: store i16 [[TMP1139]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT107]] +// CHECK: sx.atomic.exit107: +// CHECK-NEXT: [[TMP1141:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1142:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1143:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1141]], i16 [[TMP1142]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1144:%.*]] = extractvalue { i16, i1 } [[TMP1143]], 0 +// CHECK-NEXT: [[TMP1145:%.*]] = extractvalue { i16, i1 } [[TMP1143]], 1 +// CHECK-NEXT: br i1 [[TMP1145]], label [[SX_ATOMIC_EXIT109:%.*]], label [[SX_ATOMIC_CONT110:%.*]] +// CHECK: sx.atomic.cont110: +// CHECK-NEXT: store i16 [[TMP1144]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT109]] +// CHECK: sx.atomic.exit109: +// CHECK-NEXT: [[TMP1146:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1147:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1148:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1146]], i16 [[TMP1147]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1149:%.*]] = extractvalue { i16, i1 } [[TMP1148]], 1 +// CHECK-NEXT: [[TMP1150:%.*]] = zext i1 [[TMP1149]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1150]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP1151:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1152:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1153:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1151]], i16 [[TMP1152]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1154:%.*]] = extractvalue { i16, i1 } [[TMP1153]], 1 +// CHECK-NEXT: [[TMP1155:%.*]] = zext i1 [[TMP1154]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1155]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP1156:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1157:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1158:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1156]], i16 [[TMP1157]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1159:%.*]] = extractvalue { i16, i1 } [[TMP1158]], 0 +// CHECK-NEXT: [[TMP1160:%.*]] = extractvalue { i16, i1 } [[TMP1158]], 1 +// CHECK-NEXT: br i1 [[TMP1160]], label [[SX_ATOMIC_EXIT111:%.*]], label [[SX_ATOMIC_CONT112:%.*]] +// CHECK: sx.atomic.cont112: +// CHECK-NEXT: store i16 [[TMP1159]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT111]] +// CHECK: sx.atomic.exit111: +// CHECK-NEXT: [[TMP1161:%.*]] = extractvalue { i16, i1 } [[TMP1158]], 1 +// CHECK-NEXT: [[TMP1162:%.*]] = zext i1 [[TMP1161]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1162]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP1163:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1164:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1165:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1163]], i16 [[TMP1164]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1166:%.*]] = extractvalue { i16, i1 } [[TMP1165]], 0 +// CHECK-NEXT: [[TMP1167:%.*]] = extractvalue { i16, i1 } [[TMP1165]], 1 +// CHECK-NEXT: br i1 [[TMP1167]], label [[SX_ATOMIC_EXIT113:%.*]], label [[SX_ATOMIC_CONT114:%.*]] +// CHECK: sx.atomic.cont114: +// CHECK-NEXT: store i16 [[TMP1166]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT113]] +// CHECK: sx.atomic.exit113: +// CHECK-NEXT: [[TMP1168:%.*]] = extractvalue { i16, i1 } [[TMP1165]], 1 +// CHECK-NEXT: [[TMP1169:%.*]] = zext i1 [[TMP1168]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1169]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP1170:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1171:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1170]] monotonic, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1171]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1172:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1173:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1172]] monotonic, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1173]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1174:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1175:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1174]] monotonic, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1175]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1176:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1177:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1176]] monotonic, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1177]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1178:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1179:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1180:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1178]], i16 [[TMP1179]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1181:%.*]] = extractvalue { i16, i1 } [[TMP1180]], 0 +// CHECK-NEXT: store volatile i16 [[TMP1181]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1182:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1183:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1184:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1182]], i16 [[TMP1183]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1185:%.*]] = extractvalue { i16, i1 } [[TMP1184]], 0 +// CHECK-NEXT: store volatile i16 [[TMP1185]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1186:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1187:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1186]] monotonic, align 2 +// CHECK-NEXT: [[TMP1188:%.*]] = icmp ugt i16 [[TMP1187]], [[TMP1186]] +// CHECK-NEXT: [[TMP1189:%.*]] = select i1 [[TMP1188]], i16 [[TMP1186]], i16 [[TMP1187]] +// CHECK-NEXT: store volatile i16 [[TMP1189]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1190:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1191:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1190]] monotonic, align 2 +// CHECK-NEXT: [[TMP1192:%.*]] = icmp ult i16 [[TMP1191]], [[TMP1190]] +// CHECK-NEXT: [[TMP1193:%.*]] = select i1 [[TMP1192]], i16 [[TMP1190]], i16 [[TMP1191]] +// CHECK-NEXT: store volatile i16 [[TMP1193]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1194:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1195:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1194]] monotonic, align 2 +// CHECK-NEXT: [[TMP1196:%.*]] = icmp ult i16 [[TMP1195]], [[TMP1194]] +// CHECK-NEXT: [[TMP1197:%.*]] = select i1 [[TMP1196]], i16 [[TMP1194]], i16 [[TMP1195]] +// CHECK-NEXT: store volatile i16 [[TMP1197]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1198:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1199:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1198]] monotonic, align 2 +// CHECK-NEXT: [[TMP1200:%.*]] = icmp ugt i16 [[TMP1199]], [[TMP1198]] +// CHECK-NEXT: [[TMP1201:%.*]] = select i1 [[TMP1200]], i16 [[TMP1198]], i16 [[TMP1199]] +// CHECK-NEXT: store volatile i16 [[TMP1201]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1202:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1203:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1204:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1202]], i16 [[TMP1203]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1205:%.*]] = extractvalue { i16, i1 } [[TMP1204]], 0 +// CHECK-NEXT: [[TMP1206:%.*]] = extractvalue { i16, i1 } [[TMP1204]], 1 +// CHECK-NEXT: [[TMP1207:%.*]] = select i1 [[TMP1206]], i16 [[TMP1202]], i16 [[TMP1205]] +// CHECK-NEXT: store volatile i16 [[TMP1207]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1208:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1209:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1210:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1208]], i16 [[TMP1209]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1211:%.*]] = extractvalue { i16, i1 } [[TMP1210]], 0 +// CHECK-NEXT: [[TMP1212:%.*]] = extractvalue { i16, i1 } [[TMP1210]], 1 +// CHECK-NEXT: [[TMP1213:%.*]] = select i1 [[TMP1212]], i16 [[TMP1208]], i16 [[TMP1211]] +// CHECK-NEXT: store volatile i16 [[TMP1213]], i16* [[SV]], align 2 +// CHECK-NEXT: [[TMP1214:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1215:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1216:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1214]], i16 [[TMP1215]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1217:%.*]] = extractvalue { i16, i1 } [[TMP1216]], 0 +// CHECK-NEXT: [[TMP1218:%.*]] = extractvalue { i16, i1 } [[TMP1216]], 1 +// CHECK-NEXT: br i1 [[TMP1218]], label [[SX_ATOMIC_EXIT115:%.*]], label [[SX_ATOMIC_CONT116:%.*]] +// CHECK: sx.atomic.cont116: +// CHECK-NEXT: store i16 [[TMP1217]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT115]] +// CHECK: sx.atomic.exit115: +// CHECK-NEXT: [[TMP1219:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1220:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1221:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1219]], i16 [[TMP1220]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1222:%.*]] = extractvalue { i16, i1 } [[TMP1221]], 0 +// CHECK-NEXT: [[TMP1223:%.*]] = extractvalue { i16, i1 } [[TMP1221]], 1 +// CHECK-NEXT: br i1 [[TMP1223]], label [[SX_ATOMIC_EXIT117:%.*]], label [[SX_ATOMIC_CONT118:%.*]] +// CHECK: sx.atomic.cont118: +// CHECK-NEXT: store i16 [[TMP1222]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT117]] +// CHECK: sx.atomic.exit117: +// CHECK-NEXT: [[TMP1224:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1225:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1226:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1224]], i16 [[TMP1225]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1227:%.*]] = extractvalue { i16, i1 } [[TMP1226]], 1 +// CHECK-NEXT: [[TMP1228:%.*]] = zext i1 [[TMP1227]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1228]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP1229:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1230:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1231:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1229]], i16 [[TMP1230]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1232:%.*]] = extractvalue { i16, i1 } [[TMP1231]], 1 +// CHECK-NEXT: [[TMP1233:%.*]] = zext i1 [[TMP1232]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1233]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP1234:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1235:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1236:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1234]], i16 [[TMP1235]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1237:%.*]] = extractvalue { i16, i1 } [[TMP1236]], 0 +// CHECK-NEXT: [[TMP1238:%.*]] = extractvalue { i16, i1 } [[TMP1236]], 1 +// CHECK-NEXT: br i1 [[TMP1238]], label [[SX_ATOMIC_EXIT119:%.*]], label [[SX_ATOMIC_CONT120:%.*]] +// CHECK: sx.atomic.cont120: +// CHECK-NEXT: store i16 [[TMP1237]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT119]] +// CHECK: sx.atomic.exit119: +// CHECK-NEXT: [[TMP1239:%.*]] = extractvalue { i16, i1 } [[TMP1236]], 1 +// CHECK-NEXT: [[TMP1240:%.*]] = zext i1 [[TMP1239]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1240]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP1241:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1242:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1243:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1241]], i16 [[TMP1242]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1244:%.*]] = extractvalue { i16, i1 } [[TMP1243]], 0 +// CHECK-NEXT: [[TMP1245:%.*]] = extractvalue { i16, i1 } [[TMP1243]], 1 +// CHECK-NEXT: br i1 [[TMP1245]], label [[SX_ATOMIC_EXIT121:%.*]], label [[SX_ATOMIC_CONT122:%.*]] +// CHECK: sx.atomic.cont122: +// CHECK-NEXT: store i16 [[TMP1244]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT121]] +// CHECK: sx.atomic.exit121: +// CHECK-NEXT: [[TMP1246:%.*]] = extractvalue { i16, i1 } [[TMP1243]], 1 +// CHECK-NEXT: [[TMP1247:%.*]] = zext i1 [[TMP1246]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1247]], i16* [[SR]], align 2 +// CHECK-NEXT: [[TMP1248:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1249:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1248]] release, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1249]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1250:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1251:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1250]] release, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1251]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1252:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1253:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1252]] release, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1253]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1254:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1255:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1254]] release, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1255]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1256:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1257:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1258:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1256]], i16 [[TMP1257]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1259:%.*]] = extractvalue { i16, i1 } [[TMP1258]], 0 +// CHECK-NEXT: store volatile i16 [[TMP1259]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1260:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1261:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1262:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1260]], i16 [[TMP1261]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1263:%.*]] = extractvalue { i16, i1 } [[TMP1262]], 0 +// CHECK-NEXT: store volatile i16 [[TMP1263]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1264:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1265:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1264]] release, align 2 +// CHECK-NEXT: [[TMP1266:%.*]] = icmp ugt i16 [[TMP1265]], [[TMP1264]] +// CHECK-NEXT: [[TMP1267:%.*]] = select i1 [[TMP1266]], i16 [[TMP1264]], i16 [[TMP1265]] +// CHECK-NEXT: store volatile i16 [[TMP1267]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1268:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1269:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1268]] release, align 2 +// CHECK-NEXT: [[TMP1270:%.*]] = icmp ult i16 [[TMP1269]], [[TMP1268]] +// CHECK-NEXT: [[TMP1271:%.*]] = select i1 [[TMP1270]], i16 [[TMP1268]], i16 [[TMP1269]] +// CHECK-NEXT: store volatile i16 [[TMP1271]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1272:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1273:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1272]] release, align 2 +// CHECK-NEXT: [[TMP1274:%.*]] = icmp ult i16 [[TMP1273]], [[TMP1272]] +// CHECK-NEXT: [[TMP1275:%.*]] = select i1 [[TMP1274]], i16 [[TMP1272]], i16 [[TMP1273]] +// CHECK-NEXT: store volatile i16 [[TMP1275]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1276:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1277:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1276]] release, align 2 +// CHECK-NEXT: [[TMP1278:%.*]] = icmp ugt i16 [[TMP1277]], [[TMP1276]] +// CHECK-NEXT: [[TMP1279:%.*]] = select i1 [[TMP1278]], i16 [[TMP1276]], i16 [[TMP1277]] +// CHECK-NEXT: store volatile i16 [[TMP1279]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1280:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1281:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1282:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1280]], i16 [[TMP1281]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1283:%.*]] = extractvalue { i16, i1 } [[TMP1282]], 0 +// CHECK-NEXT: [[TMP1284:%.*]] = extractvalue { i16, i1 } [[TMP1282]], 1 +// CHECK-NEXT: [[TMP1285:%.*]] = select i1 [[TMP1284]], i16 [[TMP1280]], i16 [[TMP1283]] +// CHECK-NEXT: store volatile i16 [[TMP1285]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1286:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1287:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1288:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1286]], i16 [[TMP1287]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1289:%.*]] = extractvalue { i16, i1 } [[TMP1288]], 0 +// CHECK-NEXT: [[TMP1290:%.*]] = extractvalue { i16, i1 } [[TMP1288]], 1 +// CHECK-NEXT: [[TMP1291:%.*]] = select i1 [[TMP1290]], i16 [[TMP1286]], i16 [[TMP1289]] +// CHECK-NEXT: store volatile i16 [[TMP1291]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1292:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1293:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1294:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1292]], i16 [[TMP1293]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1295:%.*]] = extractvalue { i16, i1 } [[TMP1294]], 0 +// CHECK-NEXT: [[TMP1296:%.*]] = extractvalue { i16, i1 } [[TMP1294]], 1 +// CHECK-NEXT: br i1 [[TMP1296]], label [[SX_ATOMIC_EXIT123:%.*]], label [[SX_ATOMIC_CONT124:%.*]] +// CHECK: sx.atomic.cont124: +// CHECK-NEXT: store i16 [[TMP1295]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT123]] +// CHECK: sx.atomic.exit123: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1297:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1298:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1299:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1297]], i16 [[TMP1298]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1300:%.*]] = extractvalue { i16, i1 } [[TMP1299]], 0 +// CHECK-NEXT: [[TMP1301:%.*]] = extractvalue { i16, i1 } [[TMP1299]], 1 +// CHECK-NEXT: br i1 [[TMP1301]], label [[SX_ATOMIC_EXIT125:%.*]], label [[SX_ATOMIC_CONT126:%.*]] +// CHECK: sx.atomic.cont126: +// CHECK-NEXT: store i16 [[TMP1300]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT125]] +// CHECK: sx.atomic.exit125: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1302:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1303:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1304:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1302]], i16 [[TMP1303]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1305:%.*]] = extractvalue { i16, i1 } [[TMP1304]], 1 +// CHECK-NEXT: [[TMP1306:%.*]] = zext i1 [[TMP1305]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1306]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1307:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1308:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1309:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1307]], i16 [[TMP1308]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1310:%.*]] = extractvalue { i16, i1 } [[TMP1309]], 1 +// CHECK-NEXT: [[TMP1311:%.*]] = zext i1 [[TMP1310]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1311]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1312:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1313:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1314:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1312]], i16 [[TMP1313]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1315:%.*]] = extractvalue { i16, i1 } [[TMP1314]], 0 +// CHECK-NEXT: [[TMP1316:%.*]] = extractvalue { i16, i1 } [[TMP1314]], 1 +// CHECK-NEXT: br i1 [[TMP1316]], label [[SX_ATOMIC_EXIT127:%.*]], label [[SX_ATOMIC_CONT128:%.*]] +// CHECK: sx.atomic.cont128: +// CHECK-NEXT: store i16 [[TMP1315]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT127]] +// CHECK: sx.atomic.exit127: +// CHECK-NEXT: [[TMP1317:%.*]] = extractvalue { i16, i1 } [[TMP1314]], 1 +// CHECK-NEXT: [[TMP1318:%.*]] = zext i1 [[TMP1317]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1318]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1319:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1320:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1321:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1319]], i16 [[TMP1320]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1322:%.*]] = extractvalue { i16, i1 } [[TMP1321]], 0 +// CHECK-NEXT: [[TMP1323:%.*]] = extractvalue { i16, i1 } [[TMP1321]], 1 +// CHECK-NEXT: br i1 [[TMP1323]], label [[SX_ATOMIC_EXIT129:%.*]], label [[SX_ATOMIC_CONT130:%.*]] +// CHECK: sx.atomic.cont130: +// CHECK-NEXT: store i16 [[TMP1322]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT129]] +// CHECK: sx.atomic.exit129: +// CHECK-NEXT: [[TMP1324:%.*]] = extractvalue { i16, i1 } [[TMP1321]], 1 +// CHECK-NEXT: [[TMP1325:%.*]] = zext i1 [[TMP1324]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1325]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1326:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1327:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1326]] seq_cst, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1327]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1328:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1329:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1328]] seq_cst, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1329]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1330:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1331:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1330]] seq_cst, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1331]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1332:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1333:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1332]] seq_cst, align 2 +// CHECK-NEXT: store volatile i16 [[TMP1333]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1334:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1335:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1336:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1334]], i16 [[TMP1335]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1337:%.*]] = extractvalue { i16, i1 } [[TMP1336]], 0 +// CHECK-NEXT: store volatile i16 [[TMP1337]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1338:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1339:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1340:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1338]], i16 [[TMP1339]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1341:%.*]] = extractvalue { i16, i1 } [[TMP1340]], 0 +// CHECK-NEXT: store volatile i16 [[TMP1341]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1342:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1343:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1342]] seq_cst, align 2 +// CHECK-NEXT: [[TMP1344:%.*]] = icmp ugt i16 [[TMP1343]], [[TMP1342]] +// CHECK-NEXT: [[TMP1345:%.*]] = select i1 [[TMP1344]], i16 [[TMP1342]], i16 [[TMP1343]] +// CHECK-NEXT: store volatile i16 [[TMP1345]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1346:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1347:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1346]] seq_cst, align 2 +// CHECK-NEXT: [[TMP1348:%.*]] = icmp ult i16 [[TMP1347]], [[TMP1346]] +// CHECK-NEXT: [[TMP1349:%.*]] = select i1 [[TMP1348]], i16 [[TMP1346]], i16 [[TMP1347]] +// CHECK-NEXT: store volatile i16 [[TMP1349]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1350:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1351:%.*]] = atomicrmw umin i16* [[SX]], i16 [[TMP1350]] seq_cst, align 2 +// CHECK-NEXT: [[TMP1352:%.*]] = icmp ult i16 [[TMP1351]], [[TMP1350]] +// CHECK-NEXT: [[TMP1353:%.*]] = select i1 [[TMP1352]], i16 [[TMP1350]], i16 [[TMP1351]] +// CHECK-NEXT: store volatile i16 [[TMP1353]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1354:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1355:%.*]] = atomicrmw umax i16* [[SX]], i16 [[TMP1354]] seq_cst, align 2 +// CHECK-NEXT: [[TMP1356:%.*]] = icmp ugt i16 [[TMP1355]], [[TMP1354]] +// CHECK-NEXT: [[TMP1357:%.*]] = select i1 [[TMP1356]], i16 [[TMP1354]], i16 [[TMP1355]] +// CHECK-NEXT: store volatile i16 [[TMP1357]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1358:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1359:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1360:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1358]], i16 [[TMP1359]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1361:%.*]] = extractvalue { i16, i1 } [[TMP1360]], 0 +// CHECK-NEXT: [[TMP1362:%.*]] = extractvalue { i16, i1 } [[TMP1360]], 1 +// CHECK-NEXT: [[TMP1363:%.*]] = select i1 [[TMP1362]], i16 [[TMP1358]], i16 [[TMP1361]] +// CHECK-NEXT: store volatile i16 [[TMP1363]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1364:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1365:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1366:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1364]], i16 [[TMP1365]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1367:%.*]] = extractvalue { i16, i1 } [[TMP1366]], 0 +// CHECK-NEXT: [[TMP1368:%.*]] = extractvalue { i16, i1 } [[TMP1366]], 1 +// CHECK-NEXT: [[TMP1369:%.*]] = select i1 [[TMP1368]], i16 [[TMP1364]], i16 [[TMP1367]] +// CHECK-NEXT: store volatile i16 [[TMP1369]], i16* [[SV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1370:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1371:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1372:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1370]], i16 [[TMP1371]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1373:%.*]] = extractvalue { i16, i1 } [[TMP1372]], 0 +// CHECK-NEXT: [[TMP1374:%.*]] = extractvalue { i16, i1 } [[TMP1372]], 1 +// CHECK-NEXT: br i1 [[TMP1374]], label [[SX_ATOMIC_EXIT131:%.*]], label [[SX_ATOMIC_CONT132:%.*]] +// CHECK: sx.atomic.cont132: +// CHECK-NEXT: store i16 [[TMP1373]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT131]] +// CHECK: sx.atomic.exit131: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1375:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1376:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1377:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1375]], i16 [[TMP1376]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1378:%.*]] = extractvalue { i16, i1 } [[TMP1377]], 0 +// CHECK-NEXT: [[TMP1379:%.*]] = extractvalue { i16, i1 } [[TMP1377]], 1 +// CHECK-NEXT: br i1 [[TMP1379]], label [[SX_ATOMIC_EXIT133:%.*]], label [[SX_ATOMIC_CONT134:%.*]] +// CHECK: sx.atomic.cont134: +// CHECK-NEXT: store i16 [[TMP1378]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT133]] +// CHECK: sx.atomic.exit133: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1380:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1381:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1382:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1380]], i16 [[TMP1381]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1383:%.*]] = extractvalue { i16, i1 } [[TMP1382]], 1 +// CHECK-NEXT: [[TMP1384:%.*]] = zext i1 [[TMP1383]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1384]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1385:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1386:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1387:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1385]], i16 [[TMP1386]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1388:%.*]] = extractvalue { i16, i1 } [[TMP1387]], 1 +// CHECK-NEXT: [[TMP1389:%.*]] = zext i1 [[TMP1388]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1389]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1390:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1391:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1392:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1390]], i16 [[TMP1391]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1393:%.*]] = extractvalue { i16, i1 } [[TMP1392]], 0 +// CHECK-NEXT: [[TMP1394:%.*]] = extractvalue { i16, i1 } [[TMP1392]], 1 +// CHECK-NEXT: br i1 [[TMP1394]], label [[SX_ATOMIC_EXIT135:%.*]], label [[SX_ATOMIC_CONT136:%.*]] +// CHECK: sx.atomic.cont136: +// CHECK-NEXT: store i16 [[TMP1393]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT135]] +// CHECK: sx.atomic.exit135: +// CHECK-NEXT: [[TMP1395:%.*]] = extractvalue { i16, i1 } [[TMP1392]], 1 +// CHECK-NEXT: [[TMP1396:%.*]] = zext i1 [[TMP1395]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1396]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1397:%.*]] = load i16, i16* [[SE]], align 2 +// CHECK-NEXT: [[TMP1398:%.*]] = load i16, i16* [[SD]], align 2 +// CHECK-NEXT: [[TMP1399:%.*]] = cmpxchg i16* [[SX]], i16 [[TMP1397]], i16 [[TMP1398]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1400:%.*]] = extractvalue { i16, i1 } [[TMP1399]], 0 +// CHECK-NEXT: [[TMP1401:%.*]] = extractvalue { i16, i1 } [[TMP1399]], 1 +// CHECK-NEXT: br i1 [[TMP1401]], label [[SX_ATOMIC_EXIT137:%.*]], label [[SX_ATOMIC_CONT138:%.*]] +// CHECK: sx.atomic.cont138: +// CHECK-NEXT: store i16 [[TMP1400]], i16* [[SV]], align 2 +// CHECK-NEXT: br label [[SX_ATOMIC_EXIT137]] +// CHECK: sx.atomic.exit137: +// CHECK-NEXT: [[TMP1402:%.*]] = extractvalue { i16, i1 } [[TMP1399]], 1 +// CHECK-NEXT: [[TMP1403:%.*]] = zext i1 [[TMP1402]] to i16 +// CHECK-NEXT: store volatile i16 [[TMP1403]], i16* [[SR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1404:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1405:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1404]] monotonic, align 2 +// CHECK-NEXT: store i16 [[TMP1405]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1406:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1407:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1406]] monotonic, align 2 +// CHECK-NEXT: store i16 [[TMP1407]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1408:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1409:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1408]] monotonic, align 2 +// CHECK-NEXT: store i16 [[TMP1409]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1410:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1411:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1410]] monotonic, align 2 +// CHECK-NEXT: store i16 [[TMP1411]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1412:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1413:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1414:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1412]], i16 [[TMP1413]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1415:%.*]] = extractvalue { i16, i1 } [[TMP1414]], 0 +// CHECK-NEXT: store i16 [[TMP1415]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1416:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1417:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1418:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1416]], i16 [[TMP1417]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1419:%.*]] = extractvalue { i16, i1 } [[TMP1418]], 0 +// CHECK-NEXT: store i16 [[TMP1419]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1420:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1421:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1420]] monotonic, align 2 +// CHECK-NEXT: [[TMP1422:%.*]] = icmp ugt i16 [[TMP1421]], [[TMP1420]] +// CHECK-NEXT: [[TMP1423:%.*]] = select i1 [[TMP1422]], i16 [[TMP1420]], i16 [[TMP1421]] +// CHECK-NEXT: store i16 [[TMP1423]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1424:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1425:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1424]] monotonic, align 2 +// CHECK-NEXT: [[TMP1426:%.*]] = icmp ult i16 [[TMP1425]], [[TMP1424]] +// CHECK-NEXT: [[TMP1427:%.*]] = select i1 [[TMP1426]], i16 [[TMP1424]], i16 [[TMP1425]] +// CHECK-NEXT: store i16 [[TMP1427]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1428:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1429:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1428]] monotonic, align 2 +// CHECK-NEXT: [[TMP1430:%.*]] = icmp ult i16 [[TMP1429]], [[TMP1428]] +// CHECK-NEXT: [[TMP1431:%.*]] = select i1 [[TMP1430]], i16 [[TMP1428]], i16 [[TMP1429]] +// CHECK-NEXT: store i16 [[TMP1431]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1432:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1433:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1432]] monotonic, align 2 +// CHECK-NEXT: [[TMP1434:%.*]] = icmp ugt i16 [[TMP1433]], [[TMP1432]] +// CHECK-NEXT: [[TMP1435:%.*]] = select i1 [[TMP1434]], i16 [[TMP1432]], i16 [[TMP1433]] +// CHECK-NEXT: store i16 [[TMP1435]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1436:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1437:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1438:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1436]], i16 [[TMP1437]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1439:%.*]] = extractvalue { i16, i1 } [[TMP1438]], 0 +// CHECK-NEXT: [[TMP1440:%.*]] = extractvalue { i16, i1 } [[TMP1438]], 1 +// CHECK-NEXT: [[TMP1441:%.*]] = select i1 [[TMP1440]], i16 [[TMP1436]], i16 [[TMP1439]] +// CHECK-NEXT: store i16 [[TMP1441]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1442:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1443:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1444:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1442]], i16 [[TMP1443]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1445:%.*]] = extractvalue { i16, i1 } [[TMP1444]], 0 +// CHECK-NEXT: [[TMP1446:%.*]] = extractvalue { i16, i1 } [[TMP1444]], 1 +// CHECK-NEXT: [[TMP1447:%.*]] = select i1 [[TMP1446]], i16 [[TMP1442]], i16 [[TMP1445]] +// CHECK-NEXT: store i16 [[TMP1447]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1448:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1449:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1450:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1448]], i16 [[TMP1449]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1451:%.*]] = extractvalue { i16, i1 } [[TMP1450]], 0 +// CHECK-NEXT: [[TMP1452:%.*]] = extractvalue { i16, i1 } [[TMP1450]], 1 +// CHECK-NEXT: br i1 [[TMP1452]], label [[USX_ATOMIC_EXIT:%.*]], label [[USX_ATOMIC_CONT:%.*]] +// CHECK: usx.atomic.cont: +// CHECK-NEXT: store i16 [[TMP1451]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT]] +// CHECK: usx.atomic.exit: +// CHECK-NEXT: [[TMP1453:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1454:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1455:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1453]], i16 [[TMP1454]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1456:%.*]] = extractvalue { i16, i1 } [[TMP1455]], 0 +// CHECK-NEXT: [[TMP1457:%.*]] = extractvalue { i16, i1 } [[TMP1455]], 1 +// CHECK-NEXT: br i1 [[TMP1457]], label [[USX_ATOMIC_EXIT139:%.*]], label [[USX_ATOMIC_CONT140:%.*]] +// CHECK: usx.atomic.cont140: +// CHECK-NEXT: store i16 [[TMP1456]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT139]] +// CHECK: usx.atomic.exit139: +// CHECK-NEXT: [[TMP1458:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1459:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1460:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1458]], i16 [[TMP1459]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1461:%.*]] = extractvalue { i16, i1 } [[TMP1460]], 1 +// CHECK-NEXT: [[TMP1462:%.*]] = zext i1 [[TMP1461]] to i16 +// CHECK-NEXT: store i16 [[TMP1462]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1463:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1464:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1465:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1463]], i16 [[TMP1464]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1466:%.*]] = extractvalue { i16, i1 } [[TMP1465]], 1 +// CHECK-NEXT: [[TMP1467:%.*]] = zext i1 [[TMP1466]] to i16 +// CHECK-NEXT: store i16 [[TMP1467]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1468:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1469:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1470:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1468]], i16 [[TMP1469]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1471:%.*]] = extractvalue { i16, i1 } [[TMP1470]], 0 +// CHECK-NEXT: [[TMP1472:%.*]] = extractvalue { i16, i1 } [[TMP1470]], 1 +// CHECK-NEXT: br i1 [[TMP1472]], label [[USX_ATOMIC_EXIT141:%.*]], label [[USX_ATOMIC_CONT142:%.*]] +// CHECK: usx.atomic.cont142: +// CHECK-NEXT: store i16 [[TMP1471]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT141]] +// CHECK: usx.atomic.exit141: +// CHECK-NEXT: [[TMP1473:%.*]] = extractvalue { i16, i1 } [[TMP1470]], 1 +// CHECK-NEXT: [[TMP1474:%.*]] = zext i1 [[TMP1473]] to i16 +// CHECK-NEXT: store i16 [[TMP1474]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1475:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1476:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1477:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1475]], i16 [[TMP1476]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1478:%.*]] = extractvalue { i16, i1 } [[TMP1477]], 0 +// CHECK-NEXT: [[TMP1479:%.*]] = extractvalue { i16, i1 } [[TMP1477]], 1 +// CHECK-NEXT: br i1 [[TMP1479]], label [[USX_ATOMIC_EXIT143:%.*]], label [[USX_ATOMIC_CONT144:%.*]] +// CHECK: usx.atomic.cont144: +// CHECK-NEXT: store i16 [[TMP1478]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT143]] +// CHECK: usx.atomic.exit143: +// CHECK-NEXT: [[TMP1480:%.*]] = extractvalue { i16, i1 } [[TMP1477]], 1 +// CHECK-NEXT: [[TMP1481:%.*]] = zext i1 [[TMP1480]] to i16 +// CHECK-NEXT: store i16 [[TMP1481]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1482:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1483:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1482]] acq_rel, align 2 +// CHECK-NEXT: store i16 [[TMP1483]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1484:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1485:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1484]] acq_rel, align 2 +// CHECK-NEXT: store i16 [[TMP1485]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1486:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1487:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1486]] acq_rel, align 2 +// CHECK-NEXT: store i16 [[TMP1487]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1488:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1489:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1488]] acq_rel, align 2 +// CHECK-NEXT: store i16 [[TMP1489]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1490:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1491:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1492:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1490]], i16 [[TMP1491]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1493:%.*]] = extractvalue { i16, i1 } [[TMP1492]], 0 +// CHECK-NEXT: store i16 [[TMP1493]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1494:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1495:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1496:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1494]], i16 [[TMP1495]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1497:%.*]] = extractvalue { i16, i1 } [[TMP1496]], 0 +// CHECK-NEXT: store i16 [[TMP1497]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1498:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1499:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1498]] acq_rel, align 2 +// CHECK-NEXT: [[TMP1500:%.*]] = icmp ugt i16 [[TMP1499]], [[TMP1498]] +// CHECK-NEXT: [[TMP1501:%.*]] = select i1 [[TMP1500]], i16 [[TMP1498]], i16 [[TMP1499]] +// CHECK-NEXT: store i16 [[TMP1501]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1502:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1503:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1502]] acq_rel, align 2 +// CHECK-NEXT: [[TMP1504:%.*]] = icmp ult i16 [[TMP1503]], [[TMP1502]] +// CHECK-NEXT: [[TMP1505:%.*]] = select i1 [[TMP1504]], i16 [[TMP1502]], i16 [[TMP1503]] +// CHECK-NEXT: store i16 [[TMP1505]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1506:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1507:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1506]] acq_rel, align 2 +// CHECK-NEXT: [[TMP1508:%.*]] = icmp ult i16 [[TMP1507]], [[TMP1506]] +// CHECK-NEXT: [[TMP1509:%.*]] = select i1 [[TMP1508]], i16 [[TMP1506]], i16 [[TMP1507]] +// CHECK-NEXT: store i16 [[TMP1509]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1510:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1511:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1510]] acq_rel, align 2 +// CHECK-NEXT: [[TMP1512:%.*]] = icmp ugt i16 [[TMP1511]], [[TMP1510]] +// CHECK-NEXT: [[TMP1513:%.*]] = select i1 [[TMP1512]], i16 [[TMP1510]], i16 [[TMP1511]] +// CHECK-NEXT: store i16 [[TMP1513]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1514:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1515:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1516:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1514]], i16 [[TMP1515]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1517:%.*]] = extractvalue { i16, i1 } [[TMP1516]], 0 +// CHECK-NEXT: [[TMP1518:%.*]] = extractvalue { i16, i1 } [[TMP1516]], 1 +// CHECK-NEXT: [[TMP1519:%.*]] = select i1 [[TMP1518]], i16 [[TMP1514]], i16 [[TMP1517]] +// CHECK-NEXT: store i16 [[TMP1519]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1520:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1521:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1522:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1520]], i16 [[TMP1521]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1523:%.*]] = extractvalue { i16, i1 } [[TMP1522]], 0 +// CHECK-NEXT: [[TMP1524:%.*]] = extractvalue { i16, i1 } [[TMP1522]], 1 +// CHECK-NEXT: [[TMP1525:%.*]] = select i1 [[TMP1524]], i16 [[TMP1520]], i16 [[TMP1523]] +// CHECK-NEXT: store i16 [[TMP1525]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1526:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1527:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1528:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1526]], i16 [[TMP1527]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1529:%.*]] = extractvalue { i16, i1 } [[TMP1528]], 0 +// CHECK-NEXT: [[TMP1530:%.*]] = extractvalue { i16, i1 } [[TMP1528]], 1 +// CHECK-NEXT: br i1 [[TMP1530]], label [[USX_ATOMIC_EXIT145:%.*]], label [[USX_ATOMIC_CONT146:%.*]] +// CHECK: usx.atomic.cont146: +// CHECK-NEXT: store i16 [[TMP1529]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT145]] +// CHECK: usx.atomic.exit145: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1531:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1532:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1533:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1531]], i16 [[TMP1532]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1534:%.*]] = extractvalue { i16, i1 } [[TMP1533]], 0 +// CHECK-NEXT: [[TMP1535:%.*]] = extractvalue { i16, i1 } [[TMP1533]], 1 +// CHECK-NEXT: br i1 [[TMP1535]], label [[USX_ATOMIC_EXIT147:%.*]], label [[USX_ATOMIC_CONT148:%.*]] +// CHECK: usx.atomic.cont148: +// CHECK-NEXT: store i16 [[TMP1534]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT147]] +// CHECK: usx.atomic.exit147: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1536:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1537:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1538:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1536]], i16 [[TMP1537]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1539:%.*]] = extractvalue { i16, i1 } [[TMP1538]], 1 +// CHECK-NEXT: [[TMP1540:%.*]] = zext i1 [[TMP1539]] to i16 +// CHECK-NEXT: store i16 [[TMP1540]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1541:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1542:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1543:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1541]], i16 [[TMP1542]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1544:%.*]] = extractvalue { i16, i1 } [[TMP1543]], 1 +// CHECK-NEXT: [[TMP1545:%.*]] = zext i1 [[TMP1544]] to i16 +// CHECK-NEXT: store i16 [[TMP1545]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1546:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1547:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1548:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1546]], i16 [[TMP1547]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1549:%.*]] = extractvalue { i16, i1 } [[TMP1548]], 0 +// CHECK-NEXT: [[TMP1550:%.*]] = extractvalue { i16, i1 } [[TMP1548]], 1 +// CHECK-NEXT: br i1 [[TMP1550]], label [[USX_ATOMIC_EXIT149:%.*]], label [[USX_ATOMIC_CONT150:%.*]] +// CHECK: usx.atomic.cont150: +// CHECK-NEXT: store i16 [[TMP1549]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT149]] +// CHECK: usx.atomic.exit149: +// CHECK-NEXT: [[TMP1551:%.*]] = extractvalue { i16, i1 } [[TMP1548]], 1 +// CHECK-NEXT: [[TMP1552:%.*]] = zext i1 [[TMP1551]] to i16 +// CHECK-NEXT: store i16 [[TMP1552]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1553:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1554:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1555:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1553]], i16 [[TMP1554]] acq_rel acquire, align 2 +// CHECK-NEXT: [[TMP1556:%.*]] = extractvalue { i16, i1 } [[TMP1555]], 0 +// CHECK-NEXT: [[TMP1557:%.*]] = extractvalue { i16, i1 } [[TMP1555]], 1 +// CHECK-NEXT: br i1 [[TMP1557]], label [[USX_ATOMIC_EXIT151:%.*]], label [[USX_ATOMIC_CONT152:%.*]] +// CHECK: usx.atomic.cont152: +// CHECK-NEXT: store i16 [[TMP1556]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT151]] +// CHECK: usx.atomic.exit151: +// CHECK-NEXT: [[TMP1558:%.*]] = extractvalue { i16, i1 } [[TMP1555]], 1 +// CHECK-NEXT: [[TMP1559:%.*]] = zext i1 [[TMP1558]] to i16 +// CHECK-NEXT: store i16 [[TMP1559]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1560:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1561:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1560]] acquire, align 2 +// CHECK-NEXT: store i16 [[TMP1561]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1562:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1563:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1562]] acquire, align 2 +// CHECK-NEXT: store i16 [[TMP1563]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1564:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1565:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1564]] acquire, align 2 +// CHECK-NEXT: store i16 [[TMP1565]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1566:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1567:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1566]] acquire, align 2 +// CHECK-NEXT: store i16 [[TMP1567]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1568:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1569:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1570:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1568]], i16 [[TMP1569]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1571:%.*]] = extractvalue { i16, i1 } [[TMP1570]], 0 +// CHECK-NEXT: store i16 [[TMP1571]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1572:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1573:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1574:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1572]], i16 [[TMP1573]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1575:%.*]] = extractvalue { i16, i1 } [[TMP1574]], 0 +// CHECK-NEXT: store i16 [[TMP1575]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1576:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1577:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1576]] acquire, align 2 +// CHECK-NEXT: [[TMP1578:%.*]] = icmp ugt i16 [[TMP1577]], [[TMP1576]] +// CHECK-NEXT: [[TMP1579:%.*]] = select i1 [[TMP1578]], i16 [[TMP1576]], i16 [[TMP1577]] +// CHECK-NEXT: store i16 [[TMP1579]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1580:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1581:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1580]] acquire, align 2 +// CHECK-NEXT: [[TMP1582:%.*]] = icmp ult i16 [[TMP1581]], [[TMP1580]] +// CHECK-NEXT: [[TMP1583:%.*]] = select i1 [[TMP1582]], i16 [[TMP1580]], i16 [[TMP1581]] +// CHECK-NEXT: store i16 [[TMP1583]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1584:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1585:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1584]] acquire, align 2 +// CHECK-NEXT: [[TMP1586:%.*]] = icmp ult i16 [[TMP1585]], [[TMP1584]] +// CHECK-NEXT: [[TMP1587:%.*]] = select i1 [[TMP1586]], i16 [[TMP1584]], i16 [[TMP1585]] +// CHECK-NEXT: store i16 [[TMP1587]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1588:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1589:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1588]] acquire, align 2 +// CHECK-NEXT: [[TMP1590:%.*]] = icmp ugt i16 [[TMP1589]], [[TMP1588]] +// CHECK-NEXT: [[TMP1591:%.*]] = select i1 [[TMP1590]], i16 [[TMP1588]], i16 [[TMP1589]] +// CHECK-NEXT: store i16 [[TMP1591]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1592:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1593:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1594:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1592]], i16 [[TMP1593]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1595:%.*]] = extractvalue { i16, i1 } [[TMP1594]], 0 +// CHECK-NEXT: [[TMP1596:%.*]] = extractvalue { i16, i1 } [[TMP1594]], 1 +// CHECK-NEXT: [[TMP1597:%.*]] = select i1 [[TMP1596]], i16 [[TMP1592]], i16 [[TMP1595]] +// CHECK-NEXT: store i16 [[TMP1597]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1598:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1599:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1600:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1598]], i16 [[TMP1599]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1601:%.*]] = extractvalue { i16, i1 } [[TMP1600]], 0 +// CHECK-NEXT: [[TMP1602:%.*]] = extractvalue { i16, i1 } [[TMP1600]], 1 +// CHECK-NEXT: [[TMP1603:%.*]] = select i1 [[TMP1602]], i16 [[TMP1598]], i16 [[TMP1601]] +// CHECK-NEXT: store i16 [[TMP1603]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1604:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1605:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1606:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1604]], i16 [[TMP1605]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1607:%.*]] = extractvalue { i16, i1 } [[TMP1606]], 0 +// CHECK-NEXT: [[TMP1608:%.*]] = extractvalue { i16, i1 } [[TMP1606]], 1 +// CHECK-NEXT: br i1 [[TMP1608]], label [[USX_ATOMIC_EXIT153:%.*]], label [[USX_ATOMIC_CONT154:%.*]] +// CHECK: usx.atomic.cont154: +// CHECK-NEXT: store i16 [[TMP1607]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT153]] +// CHECK: usx.atomic.exit153: +// CHECK-NEXT: [[TMP1609:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1610:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1611:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1609]], i16 [[TMP1610]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1612:%.*]] = extractvalue { i16, i1 } [[TMP1611]], 0 +// CHECK-NEXT: [[TMP1613:%.*]] = extractvalue { i16, i1 } [[TMP1611]], 1 +// CHECK-NEXT: br i1 [[TMP1613]], label [[USX_ATOMIC_EXIT155:%.*]], label [[USX_ATOMIC_CONT156:%.*]] +// CHECK: usx.atomic.cont156: +// CHECK-NEXT: store i16 [[TMP1612]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT155]] +// CHECK: usx.atomic.exit155: +// CHECK-NEXT: [[TMP1614:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1615:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1616:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1614]], i16 [[TMP1615]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1617:%.*]] = extractvalue { i16, i1 } [[TMP1616]], 1 +// CHECK-NEXT: [[TMP1618:%.*]] = zext i1 [[TMP1617]] to i16 +// CHECK-NEXT: store i16 [[TMP1618]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1619:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1620:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1621:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1619]], i16 [[TMP1620]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1622:%.*]] = extractvalue { i16, i1 } [[TMP1621]], 1 +// CHECK-NEXT: [[TMP1623:%.*]] = zext i1 [[TMP1622]] to i16 +// CHECK-NEXT: store i16 [[TMP1623]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1624:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1625:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1626:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1624]], i16 [[TMP1625]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1627:%.*]] = extractvalue { i16, i1 } [[TMP1626]], 0 +// CHECK-NEXT: [[TMP1628:%.*]] = extractvalue { i16, i1 } [[TMP1626]], 1 +// CHECK-NEXT: br i1 [[TMP1628]], label [[USX_ATOMIC_EXIT157:%.*]], label [[USX_ATOMIC_CONT158:%.*]] +// CHECK: usx.atomic.cont158: +// CHECK-NEXT: store i16 [[TMP1627]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT157]] +// CHECK: usx.atomic.exit157: +// CHECK-NEXT: [[TMP1629:%.*]] = extractvalue { i16, i1 } [[TMP1626]], 1 +// CHECK-NEXT: [[TMP1630:%.*]] = zext i1 [[TMP1629]] to i16 +// CHECK-NEXT: store i16 [[TMP1630]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1631:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1632:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1633:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1631]], i16 [[TMP1632]] acquire acquire, align 2 +// CHECK-NEXT: [[TMP1634:%.*]] = extractvalue { i16, i1 } [[TMP1633]], 0 +// CHECK-NEXT: [[TMP1635:%.*]] = extractvalue { i16, i1 } [[TMP1633]], 1 +// CHECK-NEXT: br i1 [[TMP1635]], label [[USX_ATOMIC_EXIT159:%.*]], label [[USX_ATOMIC_CONT160:%.*]] +// CHECK: usx.atomic.cont160: +// CHECK-NEXT: store i16 [[TMP1634]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT159]] +// CHECK: usx.atomic.exit159: +// CHECK-NEXT: [[TMP1636:%.*]] = extractvalue { i16, i1 } [[TMP1633]], 1 +// CHECK-NEXT: [[TMP1637:%.*]] = zext i1 [[TMP1636]] to i16 +// CHECK-NEXT: store i16 [[TMP1637]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1638:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1639:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1638]] monotonic, align 2 +// CHECK-NEXT: store i16 [[TMP1639]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1640:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1641:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1640]] monotonic, align 2 +// CHECK-NEXT: store i16 [[TMP1641]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1642:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1643:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1642]] monotonic, align 2 +// CHECK-NEXT: store i16 [[TMP1643]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1644:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1645:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1644]] monotonic, align 2 +// CHECK-NEXT: store i16 [[TMP1645]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1646:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1647:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1648:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1646]], i16 [[TMP1647]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1649:%.*]] = extractvalue { i16, i1 } [[TMP1648]], 0 +// CHECK-NEXT: store i16 [[TMP1649]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1650:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1651:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1652:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1650]], i16 [[TMP1651]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1653:%.*]] = extractvalue { i16, i1 } [[TMP1652]], 0 +// CHECK-NEXT: store i16 [[TMP1653]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1654:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1655:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1654]] monotonic, align 2 +// CHECK-NEXT: [[TMP1656:%.*]] = icmp ugt i16 [[TMP1655]], [[TMP1654]] +// CHECK-NEXT: [[TMP1657:%.*]] = select i1 [[TMP1656]], i16 [[TMP1654]], i16 [[TMP1655]] +// CHECK-NEXT: store i16 [[TMP1657]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1658:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1659:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1658]] monotonic, align 2 +// CHECK-NEXT: [[TMP1660:%.*]] = icmp ult i16 [[TMP1659]], [[TMP1658]] +// CHECK-NEXT: [[TMP1661:%.*]] = select i1 [[TMP1660]], i16 [[TMP1658]], i16 [[TMP1659]] +// CHECK-NEXT: store i16 [[TMP1661]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1662:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1663:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1662]] monotonic, align 2 +// CHECK-NEXT: [[TMP1664:%.*]] = icmp ult i16 [[TMP1663]], [[TMP1662]] +// CHECK-NEXT: [[TMP1665:%.*]] = select i1 [[TMP1664]], i16 [[TMP1662]], i16 [[TMP1663]] +// CHECK-NEXT: store i16 [[TMP1665]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1666:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1667:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1666]] monotonic, align 2 +// CHECK-NEXT: [[TMP1668:%.*]] = icmp ugt i16 [[TMP1667]], [[TMP1666]] +// CHECK-NEXT: [[TMP1669:%.*]] = select i1 [[TMP1668]], i16 [[TMP1666]], i16 [[TMP1667]] +// CHECK-NEXT: store i16 [[TMP1669]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1670:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1671:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1672:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1670]], i16 [[TMP1671]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1673:%.*]] = extractvalue { i16, i1 } [[TMP1672]], 0 +// CHECK-NEXT: [[TMP1674:%.*]] = extractvalue { i16, i1 } [[TMP1672]], 1 +// CHECK-NEXT: [[TMP1675:%.*]] = select i1 [[TMP1674]], i16 [[TMP1670]], i16 [[TMP1673]] +// CHECK-NEXT: store i16 [[TMP1675]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1676:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1677:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1678:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1676]], i16 [[TMP1677]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1679:%.*]] = extractvalue { i16, i1 } [[TMP1678]], 0 +// CHECK-NEXT: [[TMP1680:%.*]] = extractvalue { i16, i1 } [[TMP1678]], 1 +// CHECK-NEXT: [[TMP1681:%.*]] = select i1 [[TMP1680]], i16 [[TMP1676]], i16 [[TMP1679]] +// CHECK-NEXT: store i16 [[TMP1681]], i16* [[USV]], align 2 +// CHECK-NEXT: [[TMP1682:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1683:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1684:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1682]], i16 [[TMP1683]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1685:%.*]] = extractvalue { i16, i1 } [[TMP1684]], 0 +// CHECK-NEXT: [[TMP1686:%.*]] = extractvalue { i16, i1 } [[TMP1684]], 1 +// CHECK-NEXT: br i1 [[TMP1686]], label [[USX_ATOMIC_EXIT161:%.*]], label [[USX_ATOMIC_CONT162:%.*]] +// CHECK: usx.atomic.cont162: +// CHECK-NEXT: store i16 [[TMP1685]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT161]] +// CHECK: usx.atomic.exit161: +// CHECK-NEXT: [[TMP1687:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1688:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1689:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1687]], i16 [[TMP1688]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1690:%.*]] = extractvalue { i16, i1 } [[TMP1689]], 0 +// CHECK-NEXT: [[TMP1691:%.*]] = extractvalue { i16, i1 } [[TMP1689]], 1 +// CHECK-NEXT: br i1 [[TMP1691]], label [[USX_ATOMIC_EXIT163:%.*]], label [[USX_ATOMIC_CONT164:%.*]] +// CHECK: usx.atomic.cont164: +// CHECK-NEXT: store i16 [[TMP1690]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT163]] +// CHECK: usx.atomic.exit163: +// CHECK-NEXT: [[TMP1692:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1693:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1694:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1692]], i16 [[TMP1693]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1695:%.*]] = extractvalue { i16, i1 } [[TMP1694]], 1 +// CHECK-NEXT: [[TMP1696:%.*]] = zext i1 [[TMP1695]] to i16 +// CHECK-NEXT: store i16 [[TMP1696]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1697:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1698:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1699:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1697]], i16 [[TMP1698]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1700:%.*]] = extractvalue { i16, i1 } [[TMP1699]], 1 +// CHECK-NEXT: [[TMP1701:%.*]] = zext i1 [[TMP1700]] to i16 +// CHECK-NEXT: store i16 [[TMP1701]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1702:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1703:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1704:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1702]], i16 [[TMP1703]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1705:%.*]] = extractvalue { i16, i1 } [[TMP1704]], 0 +// CHECK-NEXT: [[TMP1706:%.*]] = extractvalue { i16, i1 } [[TMP1704]], 1 +// CHECK-NEXT: br i1 [[TMP1706]], label [[USX_ATOMIC_EXIT165:%.*]], label [[USX_ATOMIC_CONT166:%.*]] +// CHECK: usx.atomic.cont166: +// CHECK-NEXT: store i16 [[TMP1705]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT165]] +// CHECK: usx.atomic.exit165: +// CHECK-NEXT: [[TMP1707:%.*]] = extractvalue { i16, i1 } [[TMP1704]], 1 +// CHECK-NEXT: [[TMP1708:%.*]] = zext i1 [[TMP1707]] to i16 +// CHECK-NEXT: store i16 [[TMP1708]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1709:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1710:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1711:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1709]], i16 [[TMP1710]] monotonic monotonic, align 2 +// CHECK-NEXT: [[TMP1712:%.*]] = extractvalue { i16, i1 } [[TMP1711]], 0 +// CHECK-NEXT: [[TMP1713:%.*]] = extractvalue { i16, i1 } [[TMP1711]], 1 +// CHECK-NEXT: br i1 [[TMP1713]], label [[USX_ATOMIC_EXIT167:%.*]], label [[USX_ATOMIC_CONT168:%.*]] +// CHECK: usx.atomic.cont168: +// CHECK-NEXT: store i16 [[TMP1712]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT167]] +// CHECK: usx.atomic.exit167: +// CHECK-NEXT: [[TMP1714:%.*]] = extractvalue { i16, i1 } [[TMP1711]], 1 +// CHECK-NEXT: [[TMP1715:%.*]] = zext i1 [[TMP1714]] to i16 +// CHECK-NEXT: store i16 [[TMP1715]], i16* [[USR]], align 2 +// CHECK-NEXT: [[TMP1716:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1717:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1716]] release, align 2 +// CHECK-NEXT: store i16 [[TMP1717]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1718:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1719:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1718]] release, align 2 +// CHECK-NEXT: store i16 [[TMP1719]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1720:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1721:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1720]] release, align 2 +// CHECK-NEXT: store i16 [[TMP1721]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1722:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1723:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1722]] release, align 2 +// CHECK-NEXT: store i16 [[TMP1723]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1724:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1725:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1726:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1724]], i16 [[TMP1725]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1727:%.*]] = extractvalue { i16, i1 } [[TMP1726]], 0 +// CHECK-NEXT: store i16 [[TMP1727]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1728:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1729:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1730:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1728]], i16 [[TMP1729]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1731:%.*]] = extractvalue { i16, i1 } [[TMP1730]], 0 +// CHECK-NEXT: store i16 [[TMP1731]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1732:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1733:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1732]] release, align 2 +// CHECK-NEXT: [[TMP1734:%.*]] = icmp ugt i16 [[TMP1733]], [[TMP1732]] +// CHECK-NEXT: [[TMP1735:%.*]] = select i1 [[TMP1734]], i16 [[TMP1732]], i16 [[TMP1733]] +// CHECK-NEXT: store i16 [[TMP1735]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1736:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1737:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1736]] release, align 2 +// CHECK-NEXT: [[TMP1738:%.*]] = icmp ult i16 [[TMP1737]], [[TMP1736]] +// CHECK-NEXT: [[TMP1739:%.*]] = select i1 [[TMP1738]], i16 [[TMP1736]], i16 [[TMP1737]] +// CHECK-NEXT: store i16 [[TMP1739]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1740:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1741:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1740]] release, align 2 +// CHECK-NEXT: [[TMP1742:%.*]] = icmp ult i16 [[TMP1741]], [[TMP1740]] +// CHECK-NEXT: [[TMP1743:%.*]] = select i1 [[TMP1742]], i16 [[TMP1740]], i16 [[TMP1741]] +// CHECK-NEXT: store i16 [[TMP1743]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1744:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1745:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1744]] release, align 2 +// CHECK-NEXT: [[TMP1746:%.*]] = icmp ugt i16 [[TMP1745]], [[TMP1744]] +// CHECK-NEXT: [[TMP1747:%.*]] = select i1 [[TMP1746]], i16 [[TMP1744]], i16 [[TMP1745]] +// CHECK-NEXT: store i16 [[TMP1747]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1748:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1749:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1750:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1748]], i16 [[TMP1749]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1751:%.*]] = extractvalue { i16, i1 } [[TMP1750]], 0 +// CHECK-NEXT: [[TMP1752:%.*]] = extractvalue { i16, i1 } [[TMP1750]], 1 +// CHECK-NEXT: [[TMP1753:%.*]] = select i1 [[TMP1752]], i16 [[TMP1748]], i16 [[TMP1751]] +// CHECK-NEXT: store i16 [[TMP1753]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1754:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1755:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1756:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1754]], i16 [[TMP1755]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1757:%.*]] = extractvalue { i16, i1 } [[TMP1756]], 0 +// CHECK-NEXT: [[TMP1758:%.*]] = extractvalue { i16, i1 } [[TMP1756]], 1 +// CHECK-NEXT: [[TMP1759:%.*]] = select i1 [[TMP1758]], i16 [[TMP1754]], i16 [[TMP1757]] +// CHECK-NEXT: store i16 [[TMP1759]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1760:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1761:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1762:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1760]], i16 [[TMP1761]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1763:%.*]] = extractvalue { i16, i1 } [[TMP1762]], 0 +// CHECK-NEXT: [[TMP1764:%.*]] = extractvalue { i16, i1 } [[TMP1762]], 1 +// CHECK-NEXT: br i1 [[TMP1764]], label [[USX_ATOMIC_EXIT169:%.*]], label [[USX_ATOMIC_CONT170:%.*]] +// CHECK: usx.atomic.cont170: +// CHECK-NEXT: store i16 [[TMP1763]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT169]] +// CHECK: usx.atomic.exit169: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1765:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1766:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1767:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1765]], i16 [[TMP1766]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1768:%.*]] = extractvalue { i16, i1 } [[TMP1767]], 0 +// CHECK-NEXT: [[TMP1769:%.*]] = extractvalue { i16, i1 } [[TMP1767]], 1 +// CHECK-NEXT: br i1 [[TMP1769]], label [[USX_ATOMIC_EXIT171:%.*]], label [[USX_ATOMIC_CONT172:%.*]] +// CHECK: usx.atomic.cont172: +// CHECK-NEXT: store i16 [[TMP1768]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT171]] +// CHECK: usx.atomic.exit171: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1770:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1771:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1772:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1770]], i16 [[TMP1771]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1773:%.*]] = extractvalue { i16, i1 } [[TMP1772]], 1 +// CHECK-NEXT: [[TMP1774:%.*]] = zext i1 [[TMP1773]] to i16 +// CHECK-NEXT: store i16 [[TMP1774]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1775:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1776:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1777:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1775]], i16 [[TMP1776]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1778:%.*]] = extractvalue { i16, i1 } [[TMP1777]], 1 +// CHECK-NEXT: [[TMP1779:%.*]] = zext i1 [[TMP1778]] to i16 +// CHECK-NEXT: store i16 [[TMP1779]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1780:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1781:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1782:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1780]], i16 [[TMP1781]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1783:%.*]] = extractvalue { i16, i1 } [[TMP1782]], 0 +// CHECK-NEXT: [[TMP1784:%.*]] = extractvalue { i16, i1 } [[TMP1782]], 1 +// CHECK-NEXT: br i1 [[TMP1784]], label [[USX_ATOMIC_EXIT173:%.*]], label [[USX_ATOMIC_CONT174:%.*]] +// CHECK: usx.atomic.cont174: +// CHECK-NEXT: store i16 [[TMP1783]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT173]] +// CHECK: usx.atomic.exit173: +// CHECK-NEXT: [[TMP1785:%.*]] = extractvalue { i16, i1 } [[TMP1782]], 1 +// CHECK-NEXT: [[TMP1786:%.*]] = zext i1 [[TMP1785]] to i16 +// CHECK-NEXT: store i16 [[TMP1786]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1787:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1788:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1789:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1787]], i16 [[TMP1788]] release monotonic, align 2 +// CHECK-NEXT: [[TMP1790:%.*]] = extractvalue { i16, i1 } [[TMP1789]], 0 +// CHECK-NEXT: [[TMP1791:%.*]] = extractvalue { i16, i1 } [[TMP1789]], 1 +// CHECK-NEXT: br i1 [[TMP1791]], label [[USX_ATOMIC_EXIT175:%.*]], label [[USX_ATOMIC_CONT176:%.*]] +// CHECK: usx.atomic.cont176: +// CHECK-NEXT: store i16 [[TMP1790]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT175]] +// CHECK: usx.atomic.exit175: +// CHECK-NEXT: [[TMP1792:%.*]] = extractvalue { i16, i1 } [[TMP1789]], 1 +// CHECK-NEXT: [[TMP1793:%.*]] = zext i1 [[TMP1792]] to i16 +// CHECK-NEXT: store i16 [[TMP1793]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1794:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1795:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1794]] seq_cst, align 2 +// CHECK-NEXT: store i16 [[TMP1795]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1796:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1797:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1796]] seq_cst, align 2 +// CHECK-NEXT: store i16 [[TMP1797]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1798:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1799:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1798]] seq_cst, align 2 +// CHECK-NEXT: store i16 [[TMP1799]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1800:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1801:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1800]] seq_cst, align 2 +// CHECK-NEXT: store i16 [[TMP1801]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1802:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1803:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1804:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1802]], i16 [[TMP1803]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1805:%.*]] = extractvalue { i16, i1 } [[TMP1804]], 0 +// CHECK-NEXT: store i16 [[TMP1805]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1806:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1807:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1808:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1806]], i16 [[TMP1807]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1809:%.*]] = extractvalue { i16, i1 } [[TMP1808]], 0 +// CHECK-NEXT: store i16 [[TMP1809]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1810:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1811:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1810]] seq_cst, align 2 +// CHECK-NEXT: [[TMP1812:%.*]] = icmp ugt i16 [[TMP1811]], [[TMP1810]] +// CHECK-NEXT: [[TMP1813:%.*]] = select i1 [[TMP1812]], i16 [[TMP1810]], i16 [[TMP1811]] +// CHECK-NEXT: store i16 [[TMP1813]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1814:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1815:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1814]] seq_cst, align 2 +// CHECK-NEXT: [[TMP1816:%.*]] = icmp ult i16 [[TMP1815]], [[TMP1814]] +// CHECK-NEXT: [[TMP1817:%.*]] = select i1 [[TMP1816]], i16 [[TMP1814]], i16 [[TMP1815]] +// CHECK-NEXT: store i16 [[TMP1817]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1818:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1819:%.*]] = atomicrmw umin i16* [[USX]], i16 [[TMP1818]] seq_cst, align 2 +// CHECK-NEXT: [[TMP1820:%.*]] = icmp ult i16 [[TMP1819]], [[TMP1818]] +// CHECK-NEXT: [[TMP1821:%.*]] = select i1 [[TMP1820]], i16 [[TMP1818]], i16 [[TMP1819]] +// CHECK-NEXT: store i16 [[TMP1821]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1822:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1823:%.*]] = atomicrmw umax i16* [[USX]], i16 [[TMP1822]] seq_cst, align 2 +// CHECK-NEXT: [[TMP1824:%.*]] = icmp ugt i16 [[TMP1823]], [[TMP1822]] +// CHECK-NEXT: [[TMP1825:%.*]] = select i1 [[TMP1824]], i16 [[TMP1822]], i16 [[TMP1823]] +// CHECK-NEXT: store i16 [[TMP1825]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1826:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1827:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1828:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1826]], i16 [[TMP1827]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1829:%.*]] = extractvalue { i16, i1 } [[TMP1828]], 0 +// CHECK-NEXT: [[TMP1830:%.*]] = extractvalue { i16, i1 } [[TMP1828]], 1 +// CHECK-NEXT: [[TMP1831:%.*]] = select i1 [[TMP1830]], i16 [[TMP1826]], i16 [[TMP1829]] +// CHECK-NEXT: store i16 [[TMP1831]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1832:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1833:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1834:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1832]], i16 [[TMP1833]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1835:%.*]] = extractvalue { i16, i1 } [[TMP1834]], 0 +// CHECK-NEXT: [[TMP1836:%.*]] = extractvalue { i16, i1 } [[TMP1834]], 1 +// CHECK-NEXT: [[TMP1837:%.*]] = select i1 [[TMP1836]], i16 [[TMP1832]], i16 [[TMP1835]] +// CHECK-NEXT: store i16 [[TMP1837]], i16* [[USV]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1838:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1839:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1840:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1838]], i16 [[TMP1839]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1841:%.*]] = extractvalue { i16, i1 } [[TMP1840]], 0 +// CHECK-NEXT: [[TMP1842:%.*]] = extractvalue { i16, i1 } [[TMP1840]], 1 +// CHECK-NEXT: br i1 [[TMP1842]], label [[USX_ATOMIC_EXIT177:%.*]], label [[USX_ATOMIC_CONT178:%.*]] +// CHECK: usx.atomic.cont178: +// CHECK-NEXT: store i16 [[TMP1841]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT177]] +// CHECK: usx.atomic.exit177: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1843:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1844:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1845:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1843]], i16 [[TMP1844]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1846:%.*]] = extractvalue { i16, i1 } [[TMP1845]], 0 +// CHECK-NEXT: [[TMP1847:%.*]] = extractvalue { i16, i1 } [[TMP1845]], 1 +// CHECK-NEXT: br i1 [[TMP1847]], label [[USX_ATOMIC_EXIT179:%.*]], label [[USX_ATOMIC_CONT180:%.*]] +// CHECK: usx.atomic.cont180: +// CHECK-NEXT: store i16 [[TMP1846]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT179]] +// CHECK: usx.atomic.exit179: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1848:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1849:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1850:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1848]], i16 [[TMP1849]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1851:%.*]] = extractvalue { i16, i1 } [[TMP1850]], 1 +// CHECK-NEXT: [[TMP1852:%.*]] = zext i1 [[TMP1851]] to i16 +// CHECK-NEXT: store i16 [[TMP1852]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1853:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1854:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1855:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1853]], i16 [[TMP1854]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1856:%.*]] = extractvalue { i16, i1 } [[TMP1855]], 1 +// CHECK-NEXT: [[TMP1857:%.*]] = zext i1 [[TMP1856]] to i16 +// CHECK-NEXT: store i16 [[TMP1857]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1858:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1859:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1860:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1858]], i16 [[TMP1859]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1861:%.*]] = extractvalue { i16, i1 } [[TMP1860]], 0 +// CHECK-NEXT: [[TMP1862:%.*]] = extractvalue { i16, i1 } [[TMP1860]], 1 +// CHECK-NEXT: br i1 [[TMP1862]], label [[USX_ATOMIC_EXIT181:%.*]], label [[USX_ATOMIC_CONT182:%.*]] +// CHECK: usx.atomic.cont182: +// CHECK-NEXT: store i16 [[TMP1861]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT181]] +// CHECK: usx.atomic.exit181: +// CHECK-NEXT: [[TMP1863:%.*]] = extractvalue { i16, i1 } [[TMP1860]], 1 +// CHECK-NEXT: [[TMP1864:%.*]] = zext i1 [[TMP1863]] to i16 +// CHECK-NEXT: store i16 [[TMP1864]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1865:%.*]] = load i16, i16* [[USE]], align 2 +// CHECK-NEXT: [[TMP1866:%.*]] = load i16, i16* [[USD]], align 2 +// CHECK-NEXT: [[TMP1867:%.*]] = cmpxchg i16* [[USX]], i16 [[TMP1865]], i16 [[TMP1866]] seq_cst seq_cst, align 2 +// CHECK-NEXT: [[TMP1868:%.*]] = extractvalue { i16, i1 } [[TMP1867]], 0 +// CHECK-NEXT: [[TMP1869:%.*]] = extractvalue { i16, i1 } [[TMP1867]], 1 +// CHECK-NEXT: br i1 [[TMP1869]], label [[USX_ATOMIC_EXIT183:%.*]], label [[USX_ATOMIC_CONT184:%.*]] +// CHECK: usx.atomic.cont184: +// CHECK-NEXT: store i16 [[TMP1868]], i16* [[USV]], align 2 +// CHECK-NEXT: br label [[USX_ATOMIC_EXIT183]] +// CHECK: usx.atomic.exit183: +// CHECK-NEXT: [[TMP1870:%.*]] = extractvalue { i16, i1 } [[TMP1867]], 1 +// CHECK-NEXT: [[TMP1871:%.*]] = zext i1 [[TMP1870]] to i16 +// CHECK-NEXT: store i16 [[TMP1871]], i16* [[USR]], align 2 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1872:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1873:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP1872]] monotonic, align 4 +// CHECK-NEXT: store volatile i32 [[TMP1873]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1874:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1875:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP1874]] monotonic, align 4 +// CHECK-NEXT: store volatile i32 [[TMP1875]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1876:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1877:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP1876]] monotonic, align 4 +// CHECK-NEXT: store volatile i32 [[TMP1877]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1878:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1879:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP1878]] monotonic, align 4 +// CHECK-NEXT: store volatile i32 [[TMP1879]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1880:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1881:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1882:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1880]], i32 [[TMP1881]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP1883:%.*]] = extractvalue { i32, i1 } [[TMP1882]], 0 +// CHECK-NEXT: store volatile i32 [[TMP1883]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1884:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1885:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1886:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1884]], i32 [[TMP1885]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP1887:%.*]] = extractvalue { i32, i1 } [[TMP1886]], 0 +// CHECK-NEXT: store volatile i32 [[TMP1887]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1888:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1889:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP1888]] monotonic, align 4 +// CHECK-NEXT: [[TMP1890:%.*]] = icmp ugt i32 [[TMP1889]], [[TMP1888]] +// CHECK-NEXT: [[TMP1891:%.*]] = select i1 [[TMP1890]], i32 [[TMP1888]], i32 [[TMP1889]] +// CHECK-NEXT: store volatile i32 [[TMP1891]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1892:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1893:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP1892]] monotonic, align 4 +// CHECK-NEXT: [[TMP1894:%.*]] = icmp ult i32 [[TMP1893]], [[TMP1892]] +// CHECK-NEXT: [[TMP1895:%.*]] = select i1 [[TMP1894]], i32 [[TMP1892]], i32 [[TMP1893]] +// CHECK-NEXT: store volatile i32 [[TMP1895]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1896:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1897:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP1896]] monotonic, align 4 +// CHECK-NEXT: [[TMP1898:%.*]] = icmp ult i32 [[TMP1897]], [[TMP1896]] +// CHECK-NEXT: [[TMP1899:%.*]] = select i1 [[TMP1898]], i32 [[TMP1896]], i32 [[TMP1897]] +// CHECK-NEXT: store volatile i32 [[TMP1899]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1900:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1901:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP1900]] monotonic, align 4 +// CHECK-NEXT: [[TMP1902:%.*]] = icmp ugt i32 [[TMP1901]], [[TMP1900]] +// CHECK-NEXT: [[TMP1903:%.*]] = select i1 [[TMP1902]], i32 [[TMP1900]], i32 [[TMP1901]] +// CHECK-NEXT: store volatile i32 [[TMP1903]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1904:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1905:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1906:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1904]], i32 [[TMP1905]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP1907:%.*]] = extractvalue { i32, i1 } [[TMP1906]], 0 +// CHECK-NEXT: [[TMP1908:%.*]] = extractvalue { i32, i1 } [[TMP1906]], 1 +// CHECK-NEXT: [[TMP1909:%.*]] = select i1 [[TMP1908]], i32 [[TMP1904]], i32 [[TMP1907]] +// CHECK-NEXT: store volatile i32 [[TMP1909]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1910:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1911:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1912:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1910]], i32 [[TMP1911]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP1913:%.*]] = extractvalue { i32, i1 } [[TMP1912]], 0 +// CHECK-NEXT: [[TMP1914:%.*]] = extractvalue { i32, i1 } [[TMP1912]], 1 +// CHECK-NEXT: [[TMP1915:%.*]] = select i1 [[TMP1914]], i32 [[TMP1910]], i32 [[TMP1913]] +// CHECK-NEXT: store volatile i32 [[TMP1915]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP1916:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1917:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1918:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1916]], i32 [[TMP1917]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP1919:%.*]] = extractvalue { i32, i1 } [[TMP1918]], 0 +// CHECK-NEXT: [[TMP1920:%.*]] = extractvalue { i32, i1 } [[TMP1918]], 1 +// CHECK-NEXT: br i1 [[TMP1920]], label [[IX_ATOMIC_EXIT:%.*]], label [[IX_ATOMIC_CONT:%.*]] +// CHECK: ix.atomic.cont: +// CHECK-NEXT: store i32 [[TMP1919]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT]] +// CHECK: ix.atomic.exit: +// CHECK-NEXT: [[TMP1921:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1922:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1923:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1921]], i32 [[TMP1922]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP1924:%.*]] = extractvalue { i32, i1 } [[TMP1923]], 0 +// CHECK-NEXT: [[TMP1925:%.*]] = extractvalue { i32, i1 } [[TMP1923]], 1 +// CHECK-NEXT: br i1 [[TMP1925]], label [[IX_ATOMIC_EXIT185:%.*]], label [[IX_ATOMIC_CONT186:%.*]] +// CHECK: ix.atomic.cont186: +// CHECK-NEXT: store i32 [[TMP1924]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT185]] +// CHECK: ix.atomic.exit185: +// CHECK-NEXT: [[TMP1926:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1927:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1928:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1926]], i32 [[TMP1927]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP1929:%.*]] = extractvalue { i32, i1 } [[TMP1928]], 1 +// CHECK-NEXT: [[TMP1930:%.*]] = zext i1 [[TMP1929]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP1930]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP1931:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1932:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1933:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1931]], i32 [[TMP1932]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP1934:%.*]] = extractvalue { i32, i1 } [[TMP1933]], 1 +// CHECK-NEXT: [[TMP1935:%.*]] = zext i1 [[TMP1934]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP1935]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP1936:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1937:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1938:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1936]], i32 [[TMP1937]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP1939:%.*]] = extractvalue { i32, i1 } [[TMP1938]], 0 +// CHECK-NEXT: [[TMP1940:%.*]] = extractvalue { i32, i1 } [[TMP1938]], 1 +// CHECK-NEXT: br i1 [[TMP1940]], label [[IX_ATOMIC_EXIT187:%.*]], label [[IX_ATOMIC_CONT188:%.*]] +// CHECK: ix.atomic.cont188: +// CHECK-NEXT: store i32 [[TMP1939]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT187]] +// CHECK: ix.atomic.exit187: +// CHECK-NEXT: [[TMP1941:%.*]] = extractvalue { i32, i1 } [[TMP1938]], 1 +// CHECK-NEXT: [[TMP1942:%.*]] = zext i1 [[TMP1941]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP1942]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP1943:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1944:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1945:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1943]], i32 [[TMP1944]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP1946:%.*]] = extractvalue { i32, i1 } [[TMP1945]], 0 +// CHECK-NEXT: [[TMP1947:%.*]] = extractvalue { i32, i1 } [[TMP1945]], 1 +// CHECK-NEXT: br i1 [[TMP1947]], label [[IX_ATOMIC_EXIT189:%.*]], label [[IX_ATOMIC_CONT190:%.*]] +// CHECK: ix.atomic.cont190: +// CHECK-NEXT: store i32 [[TMP1946]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT189]] +// CHECK: ix.atomic.exit189: +// CHECK-NEXT: [[TMP1948:%.*]] = extractvalue { i32, i1 } [[TMP1945]], 1 +// CHECK-NEXT: [[TMP1949:%.*]] = zext i1 [[TMP1948]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP1949]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP1950:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1951:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP1950]] acq_rel, align 4 +// CHECK-NEXT: store volatile i32 [[TMP1951]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1952:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1953:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP1952]] acq_rel, align 4 +// CHECK-NEXT: store volatile i32 [[TMP1953]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1954:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1955:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP1954]] acq_rel, align 4 +// CHECK-NEXT: store volatile i32 [[TMP1955]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1956:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1957:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP1956]] acq_rel, align 4 +// CHECK-NEXT: store volatile i32 [[TMP1957]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1958:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1959:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1960:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1958]], i32 [[TMP1959]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP1961:%.*]] = extractvalue { i32, i1 } [[TMP1960]], 0 +// CHECK-NEXT: store volatile i32 [[TMP1961]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1962:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1963:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1964:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1962]], i32 [[TMP1963]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP1965:%.*]] = extractvalue { i32, i1 } [[TMP1964]], 0 +// CHECK-NEXT: store volatile i32 [[TMP1965]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1966:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1967:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP1966]] acq_rel, align 4 +// CHECK-NEXT: [[TMP1968:%.*]] = icmp ugt i32 [[TMP1967]], [[TMP1966]] +// CHECK-NEXT: [[TMP1969:%.*]] = select i1 [[TMP1968]], i32 [[TMP1966]], i32 [[TMP1967]] +// CHECK-NEXT: store volatile i32 [[TMP1969]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1970:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1971:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP1970]] acq_rel, align 4 +// CHECK-NEXT: [[TMP1972:%.*]] = icmp ult i32 [[TMP1971]], [[TMP1970]] +// CHECK-NEXT: [[TMP1973:%.*]] = select i1 [[TMP1972]], i32 [[TMP1970]], i32 [[TMP1971]] +// CHECK-NEXT: store volatile i32 [[TMP1973]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1974:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1975:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP1974]] acq_rel, align 4 +// CHECK-NEXT: [[TMP1976:%.*]] = icmp ult i32 [[TMP1975]], [[TMP1974]] +// CHECK-NEXT: [[TMP1977:%.*]] = select i1 [[TMP1976]], i32 [[TMP1974]], i32 [[TMP1975]] +// CHECK-NEXT: store volatile i32 [[TMP1977]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1978:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1979:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP1978]] acq_rel, align 4 +// CHECK-NEXT: [[TMP1980:%.*]] = icmp ugt i32 [[TMP1979]], [[TMP1978]] +// CHECK-NEXT: [[TMP1981:%.*]] = select i1 [[TMP1980]], i32 [[TMP1978]], i32 [[TMP1979]] +// CHECK-NEXT: store volatile i32 [[TMP1981]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1982:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1983:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1984:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1982]], i32 [[TMP1983]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP1985:%.*]] = extractvalue { i32, i1 } [[TMP1984]], 0 +// CHECK-NEXT: [[TMP1986:%.*]] = extractvalue { i32, i1 } [[TMP1984]], 1 +// CHECK-NEXT: [[TMP1987:%.*]] = select i1 [[TMP1986]], i32 [[TMP1982]], i32 [[TMP1985]] +// CHECK-NEXT: store volatile i32 [[TMP1987]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1988:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1989:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1990:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1988]], i32 [[TMP1989]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP1991:%.*]] = extractvalue { i32, i1 } [[TMP1990]], 0 +// CHECK-NEXT: [[TMP1992:%.*]] = extractvalue { i32, i1 } [[TMP1990]], 1 +// CHECK-NEXT: [[TMP1993:%.*]] = select i1 [[TMP1992]], i32 [[TMP1988]], i32 [[TMP1991]] +// CHECK-NEXT: store volatile i32 [[TMP1993]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1994:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP1995:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP1996:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1994]], i32 [[TMP1995]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP1997:%.*]] = extractvalue { i32, i1 } [[TMP1996]], 0 +// CHECK-NEXT: [[TMP1998:%.*]] = extractvalue { i32, i1 } [[TMP1996]], 1 +// CHECK-NEXT: br i1 [[TMP1998]], label [[IX_ATOMIC_EXIT191:%.*]], label [[IX_ATOMIC_CONT192:%.*]] +// CHECK: ix.atomic.cont192: +// CHECK-NEXT: store i32 [[TMP1997]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT191]] +// CHECK: ix.atomic.exit191: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP1999:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2000:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2001:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP1999]], i32 [[TMP2000]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2002:%.*]] = extractvalue { i32, i1 } [[TMP2001]], 0 +// CHECK-NEXT: [[TMP2003:%.*]] = extractvalue { i32, i1 } [[TMP2001]], 1 +// CHECK-NEXT: br i1 [[TMP2003]], label [[IX_ATOMIC_EXIT193:%.*]], label [[IX_ATOMIC_CONT194:%.*]] +// CHECK: ix.atomic.cont194: +// CHECK-NEXT: store i32 [[TMP2002]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT193]] +// CHECK: ix.atomic.exit193: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2004:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2005:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2006:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2004]], i32 [[TMP2005]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2007:%.*]] = extractvalue { i32, i1 } [[TMP2006]], 1 +// CHECK-NEXT: [[TMP2008:%.*]] = zext i1 [[TMP2007]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2008]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2009:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2010:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2011:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2009]], i32 [[TMP2010]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2012:%.*]] = extractvalue { i32, i1 } [[TMP2011]], 1 +// CHECK-NEXT: [[TMP2013:%.*]] = zext i1 [[TMP2012]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2013]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2014:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2015:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2016:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2014]], i32 [[TMP2015]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2017:%.*]] = extractvalue { i32, i1 } [[TMP2016]], 0 +// CHECK-NEXT: [[TMP2018:%.*]] = extractvalue { i32, i1 } [[TMP2016]], 1 +// CHECK-NEXT: br i1 [[TMP2018]], label [[IX_ATOMIC_EXIT195:%.*]], label [[IX_ATOMIC_CONT196:%.*]] +// CHECK: ix.atomic.cont196: +// CHECK-NEXT: store i32 [[TMP2017]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT195]] +// CHECK: ix.atomic.exit195: +// CHECK-NEXT: [[TMP2019:%.*]] = extractvalue { i32, i1 } [[TMP2016]], 1 +// CHECK-NEXT: [[TMP2020:%.*]] = zext i1 [[TMP2019]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2020]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2021:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2022:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2023:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2021]], i32 [[TMP2022]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2024:%.*]] = extractvalue { i32, i1 } [[TMP2023]], 0 +// CHECK-NEXT: [[TMP2025:%.*]] = extractvalue { i32, i1 } [[TMP2023]], 1 +// CHECK-NEXT: br i1 [[TMP2025]], label [[IX_ATOMIC_EXIT197:%.*]], label [[IX_ATOMIC_CONT198:%.*]] +// CHECK: ix.atomic.cont198: +// CHECK-NEXT: store i32 [[TMP2024]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT197]] +// CHECK: ix.atomic.exit197: +// CHECK-NEXT: [[TMP2026:%.*]] = extractvalue { i32, i1 } [[TMP2023]], 1 +// CHECK-NEXT: [[TMP2027:%.*]] = zext i1 [[TMP2026]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2027]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2028:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2029:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2028]] acquire, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2029]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2030:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2031:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2030]] acquire, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2031]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2032:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2033:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2032]] acquire, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2033]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2034:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2035:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2034]] acquire, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2035]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2036:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2037:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2038:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2036]], i32 [[TMP2037]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2039:%.*]] = extractvalue { i32, i1 } [[TMP2038]], 0 +// CHECK-NEXT: store volatile i32 [[TMP2039]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2040:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2041:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2042:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2040]], i32 [[TMP2041]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2043:%.*]] = extractvalue { i32, i1 } [[TMP2042]], 0 +// CHECK-NEXT: store volatile i32 [[TMP2043]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2044:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2045:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2044]] acquire, align 4 +// CHECK-NEXT: [[TMP2046:%.*]] = icmp ugt i32 [[TMP2045]], [[TMP2044]] +// CHECK-NEXT: [[TMP2047:%.*]] = select i1 [[TMP2046]], i32 [[TMP2044]], i32 [[TMP2045]] +// CHECK-NEXT: store volatile i32 [[TMP2047]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2048:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2049:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2048]] acquire, align 4 +// CHECK-NEXT: [[TMP2050:%.*]] = icmp ult i32 [[TMP2049]], [[TMP2048]] +// CHECK-NEXT: [[TMP2051:%.*]] = select i1 [[TMP2050]], i32 [[TMP2048]], i32 [[TMP2049]] +// CHECK-NEXT: store volatile i32 [[TMP2051]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2052:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2053:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2052]] acquire, align 4 +// CHECK-NEXT: [[TMP2054:%.*]] = icmp ult i32 [[TMP2053]], [[TMP2052]] +// CHECK-NEXT: [[TMP2055:%.*]] = select i1 [[TMP2054]], i32 [[TMP2052]], i32 [[TMP2053]] +// CHECK-NEXT: store volatile i32 [[TMP2055]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2056:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2057:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2056]] acquire, align 4 +// CHECK-NEXT: [[TMP2058:%.*]] = icmp ugt i32 [[TMP2057]], [[TMP2056]] +// CHECK-NEXT: [[TMP2059:%.*]] = select i1 [[TMP2058]], i32 [[TMP2056]], i32 [[TMP2057]] +// CHECK-NEXT: store volatile i32 [[TMP2059]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2060:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2061:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2062:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2060]], i32 [[TMP2061]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2063:%.*]] = extractvalue { i32, i1 } [[TMP2062]], 0 +// CHECK-NEXT: [[TMP2064:%.*]] = extractvalue { i32, i1 } [[TMP2062]], 1 +// CHECK-NEXT: [[TMP2065:%.*]] = select i1 [[TMP2064]], i32 [[TMP2060]], i32 [[TMP2063]] +// CHECK-NEXT: store volatile i32 [[TMP2065]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2066:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2067:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2068:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2066]], i32 [[TMP2067]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2069:%.*]] = extractvalue { i32, i1 } [[TMP2068]], 0 +// CHECK-NEXT: [[TMP2070:%.*]] = extractvalue { i32, i1 } [[TMP2068]], 1 +// CHECK-NEXT: [[TMP2071:%.*]] = select i1 [[TMP2070]], i32 [[TMP2066]], i32 [[TMP2069]] +// CHECK-NEXT: store volatile i32 [[TMP2071]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2072:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2073:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2074:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2072]], i32 [[TMP2073]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2075:%.*]] = extractvalue { i32, i1 } [[TMP2074]], 0 +// CHECK-NEXT: [[TMP2076:%.*]] = extractvalue { i32, i1 } [[TMP2074]], 1 +// CHECK-NEXT: br i1 [[TMP2076]], label [[IX_ATOMIC_EXIT199:%.*]], label [[IX_ATOMIC_CONT200:%.*]] +// CHECK: ix.atomic.cont200: +// CHECK-NEXT: store i32 [[TMP2075]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT199]] +// CHECK: ix.atomic.exit199: +// CHECK-NEXT: [[TMP2077:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2078:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2079:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2077]], i32 [[TMP2078]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2080:%.*]] = extractvalue { i32, i1 } [[TMP2079]], 0 +// CHECK-NEXT: [[TMP2081:%.*]] = extractvalue { i32, i1 } [[TMP2079]], 1 +// CHECK-NEXT: br i1 [[TMP2081]], label [[IX_ATOMIC_EXIT201:%.*]], label [[IX_ATOMIC_CONT202:%.*]] +// CHECK: ix.atomic.cont202: +// CHECK-NEXT: store i32 [[TMP2080]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT201]] +// CHECK: ix.atomic.exit201: +// CHECK-NEXT: [[TMP2082:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2083:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2084:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2082]], i32 [[TMP2083]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2085:%.*]] = extractvalue { i32, i1 } [[TMP2084]], 1 +// CHECK-NEXT: [[TMP2086:%.*]] = zext i1 [[TMP2085]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2086]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP2087:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2088:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2089:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2087]], i32 [[TMP2088]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2090:%.*]] = extractvalue { i32, i1 } [[TMP2089]], 1 +// CHECK-NEXT: [[TMP2091:%.*]] = zext i1 [[TMP2090]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2091]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP2092:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2093:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2094:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2092]], i32 [[TMP2093]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2095:%.*]] = extractvalue { i32, i1 } [[TMP2094]], 0 +// CHECK-NEXT: [[TMP2096:%.*]] = extractvalue { i32, i1 } [[TMP2094]], 1 +// CHECK-NEXT: br i1 [[TMP2096]], label [[IX_ATOMIC_EXIT203:%.*]], label [[IX_ATOMIC_CONT204:%.*]] +// CHECK: ix.atomic.cont204: +// CHECK-NEXT: store i32 [[TMP2095]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT203]] +// CHECK: ix.atomic.exit203: +// CHECK-NEXT: [[TMP2097:%.*]] = extractvalue { i32, i1 } [[TMP2094]], 1 +// CHECK-NEXT: [[TMP2098:%.*]] = zext i1 [[TMP2097]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2098]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP2099:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2100:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2101:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2099]], i32 [[TMP2100]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2102:%.*]] = extractvalue { i32, i1 } [[TMP2101]], 0 +// CHECK-NEXT: [[TMP2103:%.*]] = extractvalue { i32, i1 } [[TMP2101]], 1 +// CHECK-NEXT: br i1 [[TMP2103]], label [[IX_ATOMIC_EXIT205:%.*]], label [[IX_ATOMIC_CONT206:%.*]] +// CHECK: ix.atomic.cont206: +// CHECK-NEXT: store i32 [[TMP2102]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT205]] +// CHECK: ix.atomic.exit205: +// CHECK-NEXT: [[TMP2104:%.*]] = extractvalue { i32, i1 } [[TMP2101]], 1 +// CHECK-NEXT: [[TMP2105:%.*]] = zext i1 [[TMP2104]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2105]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP2106:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2107:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2106]] monotonic, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2107]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2108:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2109:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2108]] monotonic, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2109]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2110:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2111:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2110]] monotonic, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2111]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2112:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2113:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2112]] monotonic, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2113]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2114:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2115:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2116:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2114]], i32 [[TMP2115]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2117:%.*]] = extractvalue { i32, i1 } [[TMP2116]], 0 +// CHECK-NEXT: store volatile i32 [[TMP2117]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2118:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2119:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2120:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2118]], i32 [[TMP2119]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2121:%.*]] = extractvalue { i32, i1 } [[TMP2120]], 0 +// CHECK-NEXT: store volatile i32 [[TMP2121]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2122:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2123:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2122]] monotonic, align 4 +// CHECK-NEXT: [[TMP2124:%.*]] = icmp ugt i32 [[TMP2123]], [[TMP2122]] +// CHECK-NEXT: [[TMP2125:%.*]] = select i1 [[TMP2124]], i32 [[TMP2122]], i32 [[TMP2123]] +// CHECK-NEXT: store volatile i32 [[TMP2125]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2126:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2127:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2126]] monotonic, align 4 +// CHECK-NEXT: [[TMP2128:%.*]] = icmp ult i32 [[TMP2127]], [[TMP2126]] +// CHECK-NEXT: [[TMP2129:%.*]] = select i1 [[TMP2128]], i32 [[TMP2126]], i32 [[TMP2127]] +// CHECK-NEXT: store volatile i32 [[TMP2129]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2130:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2131:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2130]] monotonic, align 4 +// CHECK-NEXT: [[TMP2132:%.*]] = icmp ult i32 [[TMP2131]], [[TMP2130]] +// CHECK-NEXT: [[TMP2133:%.*]] = select i1 [[TMP2132]], i32 [[TMP2130]], i32 [[TMP2131]] +// CHECK-NEXT: store volatile i32 [[TMP2133]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2134:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2135:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2134]] monotonic, align 4 +// CHECK-NEXT: [[TMP2136:%.*]] = icmp ugt i32 [[TMP2135]], [[TMP2134]] +// CHECK-NEXT: [[TMP2137:%.*]] = select i1 [[TMP2136]], i32 [[TMP2134]], i32 [[TMP2135]] +// CHECK-NEXT: store volatile i32 [[TMP2137]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2138:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2139:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2140:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2138]], i32 [[TMP2139]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2141:%.*]] = extractvalue { i32, i1 } [[TMP2140]], 0 +// CHECK-NEXT: [[TMP2142:%.*]] = extractvalue { i32, i1 } [[TMP2140]], 1 +// CHECK-NEXT: [[TMP2143:%.*]] = select i1 [[TMP2142]], i32 [[TMP2138]], i32 [[TMP2141]] +// CHECK-NEXT: store volatile i32 [[TMP2143]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2144:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2145:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2146:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2144]], i32 [[TMP2145]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2147:%.*]] = extractvalue { i32, i1 } [[TMP2146]], 0 +// CHECK-NEXT: [[TMP2148:%.*]] = extractvalue { i32, i1 } [[TMP2146]], 1 +// CHECK-NEXT: [[TMP2149:%.*]] = select i1 [[TMP2148]], i32 [[TMP2144]], i32 [[TMP2147]] +// CHECK-NEXT: store volatile i32 [[TMP2149]], i32* [[IV]], align 4 +// CHECK-NEXT: [[TMP2150:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2151:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2152:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2150]], i32 [[TMP2151]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2153:%.*]] = extractvalue { i32, i1 } [[TMP2152]], 0 +// CHECK-NEXT: [[TMP2154:%.*]] = extractvalue { i32, i1 } [[TMP2152]], 1 +// CHECK-NEXT: br i1 [[TMP2154]], label [[IX_ATOMIC_EXIT207:%.*]], label [[IX_ATOMIC_CONT208:%.*]] +// CHECK: ix.atomic.cont208: +// CHECK-NEXT: store i32 [[TMP2153]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT207]] +// CHECK: ix.atomic.exit207: +// CHECK-NEXT: [[TMP2155:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2156:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2157:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2155]], i32 [[TMP2156]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2158:%.*]] = extractvalue { i32, i1 } [[TMP2157]], 0 +// CHECK-NEXT: [[TMP2159:%.*]] = extractvalue { i32, i1 } [[TMP2157]], 1 +// CHECK-NEXT: br i1 [[TMP2159]], label [[IX_ATOMIC_EXIT209:%.*]], label [[IX_ATOMIC_CONT210:%.*]] +// CHECK: ix.atomic.cont210: +// CHECK-NEXT: store i32 [[TMP2158]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT209]] +// CHECK: ix.atomic.exit209: +// CHECK-NEXT: [[TMP2160:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2161:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2162:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2160]], i32 [[TMP2161]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2163:%.*]] = extractvalue { i32, i1 } [[TMP2162]], 1 +// CHECK-NEXT: [[TMP2164:%.*]] = zext i1 [[TMP2163]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2164]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP2165:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2166:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2167:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2165]], i32 [[TMP2166]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2168:%.*]] = extractvalue { i32, i1 } [[TMP2167]], 1 +// CHECK-NEXT: [[TMP2169:%.*]] = zext i1 [[TMP2168]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2169]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP2170:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2171:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2172:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2170]], i32 [[TMP2171]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2173:%.*]] = extractvalue { i32, i1 } [[TMP2172]], 0 +// CHECK-NEXT: [[TMP2174:%.*]] = extractvalue { i32, i1 } [[TMP2172]], 1 +// CHECK-NEXT: br i1 [[TMP2174]], label [[IX_ATOMIC_EXIT211:%.*]], label [[IX_ATOMIC_CONT212:%.*]] +// CHECK: ix.atomic.cont212: +// CHECK-NEXT: store i32 [[TMP2173]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT211]] +// CHECK: ix.atomic.exit211: +// CHECK-NEXT: [[TMP2175:%.*]] = extractvalue { i32, i1 } [[TMP2172]], 1 +// CHECK-NEXT: [[TMP2176:%.*]] = zext i1 [[TMP2175]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2176]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP2177:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2178:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2179:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2177]], i32 [[TMP2178]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2180:%.*]] = extractvalue { i32, i1 } [[TMP2179]], 0 +// CHECK-NEXT: [[TMP2181:%.*]] = extractvalue { i32, i1 } [[TMP2179]], 1 +// CHECK-NEXT: br i1 [[TMP2181]], label [[IX_ATOMIC_EXIT213:%.*]], label [[IX_ATOMIC_CONT214:%.*]] +// CHECK: ix.atomic.cont214: +// CHECK-NEXT: store i32 [[TMP2180]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT213]] +// CHECK: ix.atomic.exit213: +// CHECK-NEXT: [[TMP2182:%.*]] = extractvalue { i32, i1 } [[TMP2179]], 1 +// CHECK-NEXT: [[TMP2183:%.*]] = zext i1 [[TMP2182]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2183]], i32* [[IR]], align 4 +// CHECK-NEXT: [[TMP2184:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2185:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2184]] release, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2185]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2186:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2187:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2186]] release, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2187]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2188:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2189:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2188]] release, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2189]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2190:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2191:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2190]] release, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2191]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2192:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2193:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2194:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2192]], i32 [[TMP2193]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2195:%.*]] = extractvalue { i32, i1 } [[TMP2194]], 0 +// CHECK-NEXT: store volatile i32 [[TMP2195]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2196:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2197:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2198:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2196]], i32 [[TMP2197]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2199:%.*]] = extractvalue { i32, i1 } [[TMP2198]], 0 +// CHECK-NEXT: store volatile i32 [[TMP2199]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2200:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2201:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2200]] release, align 4 +// CHECK-NEXT: [[TMP2202:%.*]] = icmp ugt i32 [[TMP2201]], [[TMP2200]] +// CHECK-NEXT: [[TMP2203:%.*]] = select i1 [[TMP2202]], i32 [[TMP2200]], i32 [[TMP2201]] +// CHECK-NEXT: store volatile i32 [[TMP2203]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2204:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2205:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2204]] release, align 4 +// CHECK-NEXT: [[TMP2206:%.*]] = icmp ult i32 [[TMP2205]], [[TMP2204]] +// CHECK-NEXT: [[TMP2207:%.*]] = select i1 [[TMP2206]], i32 [[TMP2204]], i32 [[TMP2205]] +// CHECK-NEXT: store volatile i32 [[TMP2207]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2208:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2209:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2208]] release, align 4 +// CHECK-NEXT: [[TMP2210:%.*]] = icmp ult i32 [[TMP2209]], [[TMP2208]] +// CHECK-NEXT: [[TMP2211:%.*]] = select i1 [[TMP2210]], i32 [[TMP2208]], i32 [[TMP2209]] +// CHECK-NEXT: store volatile i32 [[TMP2211]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2212:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2213:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2212]] release, align 4 +// CHECK-NEXT: [[TMP2214:%.*]] = icmp ugt i32 [[TMP2213]], [[TMP2212]] +// CHECK-NEXT: [[TMP2215:%.*]] = select i1 [[TMP2214]], i32 [[TMP2212]], i32 [[TMP2213]] +// CHECK-NEXT: store volatile i32 [[TMP2215]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2216:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2217:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2218:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2216]], i32 [[TMP2217]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2219:%.*]] = extractvalue { i32, i1 } [[TMP2218]], 0 +// CHECK-NEXT: [[TMP2220:%.*]] = extractvalue { i32, i1 } [[TMP2218]], 1 +// CHECK-NEXT: [[TMP2221:%.*]] = select i1 [[TMP2220]], i32 [[TMP2216]], i32 [[TMP2219]] +// CHECK-NEXT: store volatile i32 [[TMP2221]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2222:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2223:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2224:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2222]], i32 [[TMP2223]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2225:%.*]] = extractvalue { i32, i1 } [[TMP2224]], 0 +// CHECK-NEXT: [[TMP2226:%.*]] = extractvalue { i32, i1 } [[TMP2224]], 1 +// CHECK-NEXT: [[TMP2227:%.*]] = select i1 [[TMP2226]], i32 [[TMP2222]], i32 [[TMP2225]] +// CHECK-NEXT: store volatile i32 [[TMP2227]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2228:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2229:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2230:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2228]], i32 [[TMP2229]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2231:%.*]] = extractvalue { i32, i1 } [[TMP2230]], 0 +// CHECK-NEXT: [[TMP2232:%.*]] = extractvalue { i32, i1 } [[TMP2230]], 1 +// CHECK-NEXT: br i1 [[TMP2232]], label [[IX_ATOMIC_EXIT215:%.*]], label [[IX_ATOMIC_CONT216:%.*]] +// CHECK: ix.atomic.cont216: +// CHECK-NEXT: store i32 [[TMP2231]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT215]] +// CHECK: ix.atomic.exit215: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2233:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2234:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2235:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2233]], i32 [[TMP2234]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2236:%.*]] = extractvalue { i32, i1 } [[TMP2235]], 0 +// CHECK-NEXT: [[TMP2237:%.*]] = extractvalue { i32, i1 } [[TMP2235]], 1 +// CHECK-NEXT: br i1 [[TMP2237]], label [[IX_ATOMIC_EXIT217:%.*]], label [[IX_ATOMIC_CONT218:%.*]] +// CHECK: ix.atomic.cont218: +// CHECK-NEXT: store i32 [[TMP2236]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT217]] +// CHECK: ix.atomic.exit217: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2238:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2239:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2240:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2238]], i32 [[TMP2239]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2241:%.*]] = extractvalue { i32, i1 } [[TMP2240]], 1 +// CHECK-NEXT: [[TMP2242:%.*]] = zext i1 [[TMP2241]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2242]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2243:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2244:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2245:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2243]], i32 [[TMP2244]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2246:%.*]] = extractvalue { i32, i1 } [[TMP2245]], 1 +// CHECK-NEXT: [[TMP2247:%.*]] = zext i1 [[TMP2246]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2247]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2248:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2249:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2250:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2248]], i32 [[TMP2249]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2251:%.*]] = extractvalue { i32, i1 } [[TMP2250]], 0 +// CHECK-NEXT: [[TMP2252:%.*]] = extractvalue { i32, i1 } [[TMP2250]], 1 +// CHECK-NEXT: br i1 [[TMP2252]], label [[IX_ATOMIC_EXIT219:%.*]], label [[IX_ATOMIC_CONT220:%.*]] +// CHECK: ix.atomic.cont220: +// CHECK-NEXT: store i32 [[TMP2251]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT219]] +// CHECK: ix.atomic.exit219: +// CHECK-NEXT: [[TMP2253:%.*]] = extractvalue { i32, i1 } [[TMP2250]], 1 +// CHECK-NEXT: [[TMP2254:%.*]] = zext i1 [[TMP2253]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2254]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2255:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2256:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2257:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2255]], i32 [[TMP2256]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2258:%.*]] = extractvalue { i32, i1 } [[TMP2257]], 0 +// CHECK-NEXT: [[TMP2259:%.*]] = extractvalue { i32, i1 } [[TMP2257]], 1 +// CHECK-NEXT: br i1 [[TMP2259]], label [[IX_ATOMIC_EXIT221:%.*]], label [[IX_ATOMIC_CONT222:%.*]] +// CHECK: ix.atomic.cont222: +// CHECK-NEXT: store i32 [[TMP2258]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT221]] +// CHECK: ix.atomic.exit221: +// CHECK-NEXT: [[TMP2260:%.*]] = extractvalue { i32, i1 } [[TMP2257]], 1 +// CHECK-NEXT: [[TMP2261:%.*]] = zext i1 [[TMP2260]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2261]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2262:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2263:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2262]] seq_cst, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2263]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2264:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2265:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2264]] seq_cst, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2265]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2266:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2267:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2266]] seq_cst, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2267]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2268:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2269:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2268]] seq_cst, align 4 +// CHECK-NEXT: store volatile i32 [[TMP2269]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2270:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2271:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2272:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2270]], i32 [[TMP2271]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2273:%.*]] = extractvalue { i32, i1 } [[TMP2272]], 0 +// CHECK-NEXT: store volatile i32 [[TMP2273]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2274:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2275:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2276:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2274]], i32 [[TMP2275]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2277:%.*]] = extractvalue { i32, i1 } [[TMP2276]], 0 +// CHECK-NEXT: store volatile i32 [[TMP2277]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2278:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2279:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2278]] seq_cst, align 4 +// CHECK-NEXT: [[TMP2280:%.*]] = icmp ugt i32 [[TMP2279]], [[TMP2278]] +// CHECK-NEXT: [[TMP2281:%.*]] = select i1 [[TMP2280]], i32 [[TMP2278]], i32 [[TMP2279]] +// CHECK-NEXT: store volatile i32 [[TMP2281]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2282:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2283:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2282]] seq_cst, align 4 +// CHECK-NEXT: [[TMP2284:%.*]] = icmp ult i32 [[TMP2283]], [[TMP2282]] +// CHECK-NEXT: [[TMP2285:%.*]] = select i1 [[TMP2284]], i32 [[TMP2282]], i32 [[TMP2283]] +// CHECK-NEXT: store volatile i32 [[TMP2285]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2286:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2287:%.*]] = atomicrmw umin i32* [[IX]], i32 [[TMP2286]] seq_cst, align 4 +// CHECK-NEXT: [[TMP2288:%.*]] = icmp ult i32 [[TMP2287]], [[TMP2286]] +// CHECK-NEXT: [[TMP2289:%.*]] = select i1 [[TMP2288]], i32 [[TMP2286]], i32 [[TMP2287]] +// CHECK-NEXT: store volatile i32 [[TMP2289]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2290:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2291:%.*]] = atomicrmw umax i32* [[IX]], i32 [[TMP2290]] seq_cst, align 4 +// CHECK-NEXT: [[TMP2292:%.*]] = icmp ugt i32 [[TMP2291]], [[TMP2290]] +// CHECK-NEXT: [[TMP2293:%.*]] = select i1 [[TMP2292]], i32 [[TMP2290]], i32 [[TMP2291]] +// CHECK-NEXT: store volatile i32 [[TMP2293]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2294:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2295:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2296:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2294]], i32 [[TMP2295]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2297:%.*]] = extractvalue { i32, i1 } [[TMP2296]], 0 +// CHECK-NEXT: [[TMP2298:%.*]] = extractvalue { i32, i1 } [[TMP2296]], 1 +// CHECK-NEXT: [[TMP2299:%.*]] = select i1 [[TMP2298]], i32 [[TMP2294]], i32 [[TMP2297]] +// CHECK-NEXT: store volatile i32 [[TMP2299]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2300:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2301:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2302:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2300]], i32 [[TMP2301]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2303:%.*]] = extractvalue { i32, i1 } [[TMP2302]], 0 +// CHECK-NEXT: [[TMP2304:%.*]] = extractvalue { i32, i1 } [[TMP2302]], 1 +// CHECK-NEXT: [[TMP2305:%.*]] = select i1 [[TMP2304]], i32 [[TMP2300]], i32 [[TMP2303]] +// CHECK-NEXT: store volatile i32 [[TMP2305]], i32* [[IV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2306:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2307:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2308:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2306]], i32 [[TMP2307]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2309:%.*]] = extractvalue { i32, i1 } [[TMP2308]], 0 +// CHECK-NEXT: [[TMP2310:%.*]] = extractvalue { i32, i1 } [[TMP2308]], 1 +// CHECK-NEXT: br i1 [[TMP2310]], label [[IX_ATOMIC_EXIT223:%.*]], label [[IX_ATOMIC_CONT224:%.*]] +// CHECK: ix.atomic.cont224: +// CHECK-NEXT: store i32 [[TMP2309]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT223]] +// CHECK: ix.atomic.exit223: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2311:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2312:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2313:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2311]], i32 [[TMP2312]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2314:%.*]] = extractvalue { i32, i1 } [[TMP2313]], 0 +// CHECK-NEXT: [[TMP2315:%.*]] = extractvalue { i32, i1 } [[TMP2313]], 1 +// CHECK-NEXT: br i1 [[TMP2315]], label [[IX_ATOMIC_EXIT225:%.*]], label [[IX_ATOMIC_CONT226:%.*]] +// CHECK: ix.atomic.cont226: +// CHECK-NEXT: store i32 [[TMP2314]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT225]] +// CHECK: ix.atomic.exit225: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2316:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2317:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2318:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2316]], i32 [[TMP2317]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2319:%.*]] = extractvalue { i32, i1 } [[TMP2318]], 1 +// CHECK-NEXT: [[TMP2320:%.*]] = zext i1 [[TMP2319]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2320]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2321:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2322:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2323:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2321]], i32 [[TMP2322]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2324:%.*]] = extractvalue { i32, i1 } [[TMP2323]], 1 +// CHECK-NEXT: [[TMP2325:%.*]] = zext i1 [[TMP2324]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2325]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2326:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2327:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2328:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2326]], i32 [[TMP2327]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2329:%.*]] = extractvalue { i32, i1 } [[TMP2328]], 0 +// CHECK-NEXT: [[TMP2330:%.*]] = extractvalue { i32, i1 } [[TMP2328]], 1 +// CHECK-NEXT: br i1 [[TMP2330]], label [[IX_ATOMIC_EXIT227:%.*]], label [[IX_ATOMIC_CONT228:%.*]] +// CHECK: ix.atomic.cont228: +// CHECK-NEXT: store i32 [[TMP2329]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT227]] +// CHECK: ix.atomic.exit227: +// CHECK-NEXT: [[TMP2331:%.*]] = extractvalue { i32, i1 } [[TMP2328]], 1 +// CHECK-NEXT: [[TMP2332:%.*]] = zext i1 [[TMP2331]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2332]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2333:%.*]] = load i32, i32* [[IE]], align 4 +// CHECK-NEXT: [[TMP2334:%.*]] = load i32, i32* [[ID]], align 4 +// CHECK-NEXT: [[TMP2335:%.*]] = cmpxchg i32* [[IX]], i32 [[TMP2333]], i32 [[TMP2334]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2336:%.*]] = extractvalue { i32, i1 } [[TMP2335]], 0 +// CHECK-NEXT: [[TMP2337:%.*]] = extractvalue { i32, i1 } [[TMP2335]], 1 +// CHECK-NEXT: br i1 [[TMP2337]], label [[IX_ATOMIC_EXIT229:%.*]], label [[IX_ATOMIC_CONT230:%.*]] +// CHECK: ix.atomic.cont230: +// CHECK-NEXT: store i32 [[TMP2336]], i32* [[IV]], align 4 +// CHECK-NEXT: br label [[IX_ATOMIC_EXIT229]] +// CHECK: ix.atomic.exit229: +// CHECK-NEXT: [[TMP2338:%.*]] = extractvalue { i32, i1 } [[TMP2335]], 1 +// CHECK-NEXT: [[TMP2339:%.*]] = zext i1 [[TMP2338]] to i32 +// CHECK-NEXT: store volatile i32 [[TMP2339]], i32* [[IR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2340:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2341:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2340]] monotonic, align 4 +// CHECK-NEXT: store i32 [[TMP2341]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2342:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2343:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2342]] monotonic, align 4 +// CHECK-NEXT: store i32 [[TMP2343]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2344:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2345:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2344]] monotonic, align 4 +// CHECK-NEXT: store i32 [[TMP2345]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2346:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2347:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2346]] monotonic, align 4 +// CHECK-NEXT: store i32 [[TMP2347]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2348:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2349:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2350:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2348]], i32 [[TMP2349]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2351:%.*]] = extractvalue { i32, i1 } [[TMP2350]], 0 +// CHECK-NEXT: store i32 [[TMP2351]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2352:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2353:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2354:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2352]], i32 [[TMP2353]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2355:%.*]] = extractvalue { i32, i1 } [[TMP2354]], 0 +// CHECK-NEXT: store i32 [[TMP2355]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2356:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2357:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2356]] monotonic, align 4 +// CHECK-NEXT: [[TMP2358:%.*]] = icmp ugt i32 [[TMP2357]], [[TMP2356]] +// CHECK-NEXT: [[TMP2359:%.*]] = select i1 [[TMP2358]], i32 [[TMP2356]], i32 [[TMP2357]] +// CHECK-NEXT: store i32 [[TMP2359]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2360:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2361:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2360]] monotonic, align 4 +// CHECK-NEXT: [[TMP2362:%.*]] = icmp ult i32 [[TMP2361]], [[TMP2360]] +// CHECK-NEXT: [[TMP2363:%.*]] = select i1 [[TMP2362]], i32 [[TMP2360]], i32 [[TMP2361]] +// CHECK-NEXT: store i32 [[TMP2363]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2364:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2365:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2364]] monotonic, align 4 +// CHECK-NEXT: [[TMP2366:%.*]] = icmp ult i32 [[TMP2365]], [[TMP2364]] +// CHECK-NEXT: [[TMP2367:%.*]] = select i1 [[TMP2366]], i32 [[TMP2364]], i32 [[TMP2365]] +// CHECK-NEXT: store i32 [[TMP2367]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2368:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2369:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2368]] monotonic, align 4 +// CHECK-NEXT: [[TMP2370:%.*]] = icmp ugt i32 [[TMP2369]], [[TMP2368]] +// CHECK-NEXT: [[TMP2371:%.*]] = select i1 [[TMP2370]], i32 [[TMP2368]], i32 [[TMP2369]] +// CHECK-NEXT: store i32 [[TMP2371]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2372:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2373:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2374:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2372]], i32 [[TMP2373]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2375:%.*]] = extractvalue { i32, i1 } [[TMP2374]], 0 +// CHECK-NEXT: [[TMP2376:%.*]] = extractvalue { i32, i1 } [[TMP2374]], 1 +// CHECK-NEXT: [[TMP2377:%.*]] = select i1 [[TMP2376]], i32 [[TMP2372]], i32 [[TMP2375]] +// CHECK-NEXT: store i32 [[TMP2377]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2378:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2379:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2380:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2378]], i32 [[TMP2379]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2381:%.*]] = extractvalue { i32, i1 } [[TMP2380]], 0 +// CHECK-NEXT: [[TMP2382:%.*]] = extractvalue { i32, i1 } [[TMP2380]], 1 +// CHECK-NEXT: [[TMP2383:%.*]] = select i1 [[TMP2382]], i32 [[TMP2378]], i32 [[TMP2381]] +// CHECK-NEXT: store i32 [[TMP2383]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2384:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2385:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2386:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2384]], i32 [[TMP2385]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2387:%.*]] = extractvalue { i32, i1 } [[TMP2386]], 0 +// CHECK-NEXT: [[TMP2388:%.*]] = extractvalue { i32, i1 } [[TMP2386]], 1 +// CHECK-NEXT: br i1 [[TMP2388]], label [[UIX_ATOMIC_EXIT:%.*]], label [[UIX_ATOMIC_CONT:%.*]] +// CHECK: uix.atomic.cont: +// CHECK-NEXT: store i32 [[TMP2387]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT]] +// CHECK: uix.atomic.exit: +// CHECK-NEXT: [[TMP2389:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2390:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2391:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2389]], i32 [[TMP2390]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2392:%.*]] = extractvalue { i32, i1 } [[TMP2391]], 0 +// CHECK-NEXT: [[TMP2393:%.*]] = extractvalue { i32, i1 } [[TMP2391]], 1 +// CHECK-NEXT: br i1 [[TMP2393]], label [[UIX_ATOMIC_EXIT231:%.*]], label [[UIX_ATOMIC_CONT232:%.*]] +// CHECK: uix.atomic.cont232: +// CHECK-NEXT: store i32 [[TMP2392]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT231]] +// CHECK: uix.atomic.exit231: +// CHECK-NEXT: [[TMP2394:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2395:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2396:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2394]], i32 [[TMP2395]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2397:%.*]] = extractvalue { i32, i1 } [[TMP2396]], 1 +// CHECK-NEXT: [[TMP2398:%.*]] = zext i1 [[TMP2397]] to i32 +// CHECK-NEXT: store i32 [[TMP2398]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2399:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2400:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2401:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2399]], i32 [[TMP2400]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2402:%.*]] = extractvalue { i32, i1 } [[TMP2401]], 1 +// CHECK-NEXT: [[TMP2403:%.*]] = zext i1 [[TMP2402]] to i32 +// CHECK-NEXT: store i32 [[TMP2403]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2404:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2405:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2406:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2404]], i32 [[TMP2405]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2407:%.*]] = extractvalue { i32, i1 } [[TMP2406]], 0 +// CHECK-NEXT: [[TMP2408:%.*]] = extractvalue { i32, i1 } [[TMP2406]], 1 +// CHECK-NEXT: br i1 [[TMP2408]], label [[UIX_ATOMIC_EXIT233:%.*]], label [[UIX_ATOMIC_CONT234:%.*]] +// CHECK: uix.atomic.cont234: +// CHECK-NEXT: store i32 [[TMP2407]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT233]] +// CHECK: uix.atomic.exit233: +// CHECK-NEXT: [[TMP2409:%.*]] = extractvalue { i32, i1 } [[TMP2406]], 1 +// CHECK-NEXT: [[TMP2410:%.*]] = zext i1 [[TMP2409]] to i32 +// CHECK-NEXT: store i32 [[TMP2410]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2411:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2412:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2413:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2411]], i32 [[TMP2412]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2414:%.*]] = extractvalue { i32, i1 } [[TMP2413]], 0 +// CHECK-NEXT: [[TMP2415:%.*]] = extractvalue { i32, i1 } [[TMP2413]], 1 +// CHECK-NEXT: br i1 [[TMP2415]], label [[UIX_ATOMIC_EXIT235:%.*]], label [[UIX_ATOMIC_CONT236:%.*]] +// CHECK: uix.atomic.cont236: +// CHECK-NEXT: store i32 [[TMP2414]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT235]] +// CHECK: uix.atomic.exit235: +// CHECK-NEXT: [[TMP2416:%.*]] = extractvalue { i32, i1 } [[TMP2413]], 1 +// CHECK-NEXT: [[TMP2417:%.*]] = zext i1 [[TMP2416]] to i32 +// CHECK-NEXT: store i32 [[TMP2417]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2418:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2419:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2418]] acq_rel, align 4 +// CHECK-NEXT: store i32 [[TMP2419]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2420:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2421:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2420]] acq_rel, align 4 +// CHECK-NEXT: store i32 [[TMP2421]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2422:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2423:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2422]] acq_rel, align 4 +// CHECK-NEXT: store i32 [[TMP2423]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2424:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2425:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2424]] acq_rel, align 4 +// CHECK-NEXT: store i32 [[TMP2425]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2426:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2427:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2428:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2426]], i32 [[TMP2427]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2429:%.*]] = extractvalue { i32, i1 } [[TMP2428]], 0 +// CHECK-NEXT: store i32 [[TMP2429]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2430:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2431:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2432:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2430]], i32 [[TMP2431]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2433:%.*]] = extractvalue { i32, i1 } [[TMP2432]], 0 +// CHECK-NEXT: store i32 [[TMP2433]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2434:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2435:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2434]] acq_rel, align 4 +// CHECK-NEXT: [[TMP2436:%.*]] = icmp ugt i32 [[TMP2435]], [[TMP2434]] +// CHECK-NEXT: [[TMP2437:%.*]] = select i1 [[TMP2436]], i32 [[TMP2434]], i32 [[TMP2435]] +// CHECK-NEXT: store i32 [[TMP2437]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2438:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2439:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2438]] acq_rel, align 4 +// CHECK-NEXT: [[TMP2440:%.*]] = icmp ult i32 [[TMP2439]], [[TMP2438]] +// CHECK-NEXT: [[TMP2441:%.*]] = select i1 [[TMP2440]], i32 [[TMP2438]], i32 [[TMP2439]] +// CHECK-NEXT: store i32 [[TMP2441]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2442:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2443:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2442]] acq_rel, align 4 +// CHECK-NEXT: [[TMP2444:%.*]] = icmp ult i32 [[TMP2443]], [[TMP2442]] +// CHECK-NEXT: [[TMP2445:%.*]] = select i1 [[TMP2444]], i32 [[TMP2442]], i32 [[TMP2443]] +// CHECK-NEXT: store i32 [[TMP2445]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2446:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2447:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2446]] acq_rel, align 4 +// CHECK-NEXT: [[TMP2448:%.*]] = icmp ugt i32 [[TMP2447]], [[TMP2446]] +// CHECK-NEXT: [[TMP2449:%.*]] = select i1 [[TMP2448]], i32 [[TMP2446]], i32 [[TMP2447]] +// CHECK-NEXT: store i32 [[TMP2449]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2450:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2451:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2452:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2450]], i32 [[TMP2451]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2453:%.*]] = extractvalue { i32, i1 } [[TMP2452]], 0 +// CHECK-NEXT: [[TMP2454:%.*]] = extractvalue { i32, i1 } [[TMP2452]], 1 +// CHECK-NEXT: [[TMP2455:%.*]] = select i1 [[TMP2454]], i32 [[TMP2450]], i32 [[TMP2453]] +// CHECK-NEXT: store i32 [[TMP2455]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2456:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2457:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2458:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2456]], i32 [[TMP2457]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2459:%.*]] = extractvalue { i32, i1 } [[TMP2458]], 0 +// CHECK-NEXT: [[TMP2460:%.*]] = extractvalue { i32, i1 } [[TMP2458]], 1 +// CHECK-NEXT: [[TMP2461:%.*]] = select i1 [[TMP2460]], i32 [[TMP2456]], i32 [[TMP2459]] +// CHECK-NEXT: store i32 [[TMP2461]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2462:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2463:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2464:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2462]], i32 [[TMP2463]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2465:%.*]] = extractvalue { i32, i1 } [[TMP2464]], 0 +// CHECK-NEXT: [[TMP2466:%.*]] = extractvalue { i32, i1 } [[TMP2464]], 1 +// CHECK-NEXT: br i1 [[TMP2466]], label [[UIX_ATOMIC_EXIT237:%.*]], label [[UIX_ATOMIC_CONT238:%.*]] +// CHECK: uix.atomic.cont238: +// CHECK-NEXT: store i32 [[TMP2465]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT237]] +// CHECK: uix.atomic.exit237: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2467:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2468:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2469:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2467]], i32 [[TMP2468]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2470:%.*]] = extractvalue { i32, i1 } [[TMP2469]], 0 +// CHECK-NEXT: [[TMP2471:%.*]] = extractvalue { i32, i1 } [[TMP2469]], 1 +// CHECK-NEXT: br i1 [[TMP2471]], label [[UIX_ATOMIC_EXIT239:%.*]], label [[UIX_ATOMIC_CONT240:%.*]] +// CHECK: uix.atomic.cont240: +// CHECK-NEXT: store i32 [[TMP2470]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT239]] +// CHECK: uix.atomic.exit239: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2472:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2473:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2474:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2472]], i32 [[TMP2473]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2475:%.*]] = extractvalue { i32, i1 } [[TMP2474]], 1 +// CHECK-NEXT: [[TMP2476:%.*]] = zext i1 [[TMP2475]] to i32 +// CHECK-NEXT: store i32 [[TMP2476]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2477:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2478:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2479:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2477]], i32 [[TMP2478]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2480:%.*]] = extractvalue { i32, i1 } [[TMP2479]], 1 +// CHECK-NEXT: [[TMP2481:%.*]] = zext i1 [[TMP2480]] to i32 +// CHECK-NEXT: store i32 [[TMP2481]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2482:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2483:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2484:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2482]], i32 [[TMP2483]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2485:%.*]] = extractvalue { i32, i1 } [[TMP2484]], 0 +// CHECK-NEXT: [[TMP2486:%.*]] = extractvalue { i32, i1 } [[TMP2484]], 1 +// CHECK-NEXT: br i1 [[TMP2486]], label [[UIX_ATOMIC_EXIT241:%.*]], label [[UIX_ATOMIC_CONT242:%.*]] +// CHECK: uix.atomic.cont242: +// CHECK-NEXT: store i32 [[TMP2485]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT241]] +// CHECK: uix.atomic.exit241: +// CHECK-NEXT: [[TMP2487:%.*]] = extractvalue { i32, i1 } [[TMP2484]], 1 +// CHECK-NEXT: [[TMP2488:%.*]] = zext i1 [[TMP2487]] to i32 +// CHECK-NEXT: store i32 [[TMP2488]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2489:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2490:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2491:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2489]], i32 [[TMP2490]] acq_rel acquire, align 4 +// CHECK-NEXT: [[TMP2492:%.*]] = extractvalue { i32, i1 } [[TMP2491]], 0 +// CHECK-NEXT: [[TMP2493:%.*]] = extractvalue { i32, i1 } [[TMP2491]], 1 +// CHECK-NEXT: br i1 [[TMP2493]], label [[UIX_ATOMIC_EXIT243:%.*]], label [[UIX_ATOMIC_CONT244:%.*]] +// CHECK: uix.atomic.cont244: +// CHECK-NEXT: store i32 [[TMP2492]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT243]] +// CHECK: uix.atomic.exit243: +// CHECK-NEXT: [[TMP2494:%.*]] = extractvalue { i32, i1 } [[TMP2491]], 1 +// CHECK-NEXT: [[TMP2495:%.*]] = zext i1 [[TMP2494]] to i32 +// CHECK-NEXT: store i32 [[TMP2495]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2496:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2497:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2496]] acquire, align 4 +// CHECK-NEXT: store i32 [[TMP2497]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2498:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2499:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2498]] acquire, align 4 +// CHECK-NEXT: store i32 [[TMP2499]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2500:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2501:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2500]] acquire, align 4 +// CHECK-NEXT: store i32 [[TMP2501]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2502:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2503:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2502]] acquire, align 4 +// CHECK-NEXT: store i32 [[TMP2503]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2504:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2505:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2506:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2504]], i32 [[TMP2505]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2507:%.*]] = extractvalue { i32, i1 } [[TMP2506]], 0 +// CHECK-NEXT: store i32 [[TMP2507]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2508:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2509:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2510:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2508]], i32 [[TMP2509]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2511:%.*]] = extractvalue { i32, i1 } [[TMP2510]], 0 +// CHECK-NEXT: store i32 [[TMP2511]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2512:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2513:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2512]] acquire, align 4 +// CHECK-NEXT: [[TMP2514:%.*]] = icmp ugt i32 [[TMP2513]], [[TMP2512]] +// CHECK-NEXT: [[TMP2515:%.*]] = select i1 [[TMP2514]], i32 [[TMP2512]], i32 [[TMP2513]] +// CHECK-NEXT: store i32 [[TMP2515]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2516:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2517:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2516]] acquire, align 4 +// CHECK-NEXT: [[TMP2518:%.*]] = icmp ult i32 [[TMP2517]], [[TMP2516]] +// CHECK-NEXT: [[TMP2519:%.*]] = select i1 [[TMP2518]], i32 [[TMP2516]], i32 [[TMP2517]] +// CHECK-NEXT: store i32 [[TMP2519]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2520:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2521:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2520]] acquire, align 4 +// CHECK-NEXT: [[TMP2522:%.*]] = icmp ult i32 [[TMP2521]], [[TMP2520]] +// CHECK-NEXT: [[TMP2523:%.*]] = select i1 [[TMP2522]], i32 [[TMP2520]], i32 [[TMP2521]] +// CHECK-NEXT: store i32 [[TMP2523]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2524:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2525:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2524]] acquire, align 4 +// CHECK-NEXT: [[TMP2526:%.*]] = icmp ugt i32 [[TMP2525]], [[TMP2524]] +// CHECK-NEXT: [[TMP2527:%.*]] = select i1 [[TMP2526]], i32 [[TMP2524]], i32 [[TMP2525]] +// CHECK-NEXT: store i32 [[TMP2527]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2528:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2529:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2530:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2528]], i32 [[TMP2529]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2531:%.*]] = extractvalue { i32, i1 } [[TMP2530]], 0 +// CHECK-NEXT: [[TMP2532:%.*]] = extractvalue { i32, i1 } [[TMP2530]], 1 +// CHECK-NEXT: [[TMP2533:%.*]] = select i1 [[TMP2532]], i32 [[TMP2528]], i32 [[TMP2531]] +// CHECK-NEXT: store i32 [[TMP2533]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2534:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2535:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2536:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2534]], i32 [[TMP2535]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2537:%.*]] = extractvalue { i32, i1 } [[TMP2536]], 0 +// CHECK-NEXT: [[TMP2538:%.*]] = extractvalue { i32, i1 } [[TMP2536]], 1 +// CHECK-NEXT: [[TMP2539:%.*]] = select i1 [[TMP2538]], i32 [[TMP2534]], i32 [[TMP2537]] +// CHECK-NEXT: store i32 [[TMP2539]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2540:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2541:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2542:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2540]], i32 [[TMP2541]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2543:%.*]] = extractvalue { i32, i1 } [[TMP2542]], 0 +// CHECK-NEXT: [[TMP2544:%.*]] = extractvalue { i32, i1 } [[TMP2542]], 1 +// CHECK-NEXT: br i1 [[TMP2544]], label [[UIX_ATOMIC_EXIT245:%.*]], label [[UIX_ATOMIC_CONT246:%.*]] +// CHECK: uix.atomic.cont246: +// CHECK-NEXT: store i32 [[TMP2543]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT245]] +// CHECK: uix.atomic.exit245: +// CHECK-NEXT: [[TMP2545:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2546:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2547:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2545]], i32 [[TMP2546]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2548:%.*]] = extractvalue { i32, i1 } [[TMP2547]], 0 +// CHECK-NEXT: [[TMP2549:%.*]] = extractvalue { i32, i1 } [[TMP2547]], 1 +// CHECK-NEXT: br i1 [[TMP2549]], label [[UIX_ATOMIC_EXIT247:%.*]], label [[UIX_ATOMIC_CONT248:%.*]] +// CHECK: uix.atomic.cont248: +// CHECK-NEXT: store i32 [[TMP2548]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT247]] +// CHECK: uix.atomic.exit247: +// CHECK-NEXT: [[TMP2550:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2551:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2552:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2550]], i32 [[TMP2551]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2553:%.*]] = extractvalue { i32, i1 } [[TMP2552]], 1 +// CHECK-NEXT: [[TMP2554:%.*]] = zext i1 [[TMP2553]] to i32 +// CHECK-NEXT: store i32 [[TMP2554]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2555:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2556:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2557:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2555]], i32 [[TMP2556]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2558:%.*]] = extractvalue { i32, i1 } [[TMP2557]], 1 +// CHECK-NEXT: [[TMP2559:%.*]] = zext i1 [[TMP2558]] to i32 +// CHECK-NEXT: store i32 [[TMP2559]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2560:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2561:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2562:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2560]], i32 [[TMP2561]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2563:%.*]] = extractvalue { i32, i1 } [[TMP2562]], 0 +// CHECK-NEXT: [[TMP2564:%.*]] = extractvalue { i32, i1 } [[TMP2562]], 1 +// CHECK-NEXT: br i1 [[TMP2564]], label [[UIX_ATOMIC_EXIT249:%.*]], label [[UIX_ATOMIC_CONT250:%.*]] +// CHECK: uix.atomic.cont250: +// CHECK-NEXT: store i32 [[TMP2563]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT249]] +// CHECK: uix.atomic.exit249: +// CHECK-NEXT: [[TMP2565:%.*]] = extractvalue { i32, i1 } [[TMP2562]], 1 +// CHECK-NEXT: [[TMP2566:%.*]] = zext i1 [[TMP2565]] to i32 +// CHECK-NEXT: store i32 [[TMP2566]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2567:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2568:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2569:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2567]], i32 [[TMP2568]] acquire acquire, align 4 +// CHECK-NEXT: [[TMP2570:%.*]] = extractvalue { i32, i1 } [[TMP2569]], 0 +// CHECK-NEXT: [[TMP2571:%.*]] = extractvalue { i32, i1 } [[TMP2569]], 1 +// CHECK-NEXT: br i1 [[TMP2571]], label [[UIX_ATOMIC_EXIT251:%.*]], label [[UIX_ATOMIC_CONT252:%.*]] +// CHECK: uix.atomic.cont252: +// CHECK-NEXT: store i32 [[TMP2570]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT251]] +// CHECK: uix.atomic.exit251: +// CHECK-NEXT: [[TMP2572:%.*]] = extractvalue { i32, i1 } [[TMP2569]], 1 +// CHECK-NEXT: [[TMP2573:%.*]] = zext i1 [[TMP2572]] to i32 +// CHECK-NEXT: store i32 [[TMP2573]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2574:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2575:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2574]] monotonic, align 4 +// CHECK-NEXT: store i32 [[TMP2575]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2576:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2577:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2576]] monotonic, align 4 +// CHECK-NEXT: store i32 [[TMP2577]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2578:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2579:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2578]] monotonic, align 4 +// CHECK-NEXT: store i32 [[TMP2579]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2580:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2581:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2580]] monotonic, align 4 +// CHECK-NEXT: store i32 [[TMP2581]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2582:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2583:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2584:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2582]], i32 [[TMP2583]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2585:%.*]] = extractvalue { i32, i1 } [[TMP2584]], 0 +// CHECK-NEXT: store i32 [[TMP2585]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2586:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2587:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2588:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2586]], i32 [[TMP2587]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2589:%.*]] = extractvalue { i32, i1 } [[TMP2588]], 0 +// CHECK-NEXT: store i32 [[TMP2589]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2590:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2591:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2590]] monotonic, align 4 +// CHECK-NEXT: [[TMP2592:%.*]] = icmp ugt i32 [[TMP2591]], [[TMP2590]] +// CHECK-NEXT: [[TMP2593:%.*]] = select i1 [[TMP2592]], i32 [[TMP2590]], i32 [[TMP2591]] +// CHECK-NEXT: store i32 [[TMP2593]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2594:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2595:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2594]] monotonic, align 4 +// CHECK-NEXT: [[TMP2596:%.*]] = icmp ult i32 [[TMP2595]], [[TMP2594]] +// CHECK-NEXT: [[TMP2597:%.*]] = select i1 [[TMP2596]], i32 [[TMP2594]], i32 [[TMP2595]] +// CHECK-NEXT: store i32 [[TMP2597]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2598:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2599:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2598]] monotonic, align 4 +// CHECK-NEXT: [[TMP2600:%.*]] = icmp ult i32 [[TMP2599]], [[TMP2598]] +// CHECK-NEXT: [[TMP2601:%.*]] = select i1 [[TMP2600]], i32 [[TMP2598]], i32 [[TMP2599]] +// CHECK-NEXT: store i32 [[TMP2601]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2602:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2603:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2602]] monotonic, align 4 +// CHECK-NEXT: [[TMP2604:%.*]] = icmp ugt i32 [[TMP2603]], [[TMP2602]] +// CHECK-NEXT: [[TMP2605:%.*]] = select i1 [[TMP2604]], i32 [[TMP2602]], i32 [[TMP2603]] +// CHECK-NEXT: store i32 [[TMP2605]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2606:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2607:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2608:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2606]], i32 [[TMP2607]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2609:%.*]] = extractvalue { i32, i1 } [[TMP2608]], 0 +// CHECK-NEXT: [[TMP2610:%.*]] = extractvalue { i32, i1 } [[TMP2608]], 1 +// CHECK-NEXT: [[TMP2611:%.*]] = select i1 [[TMP2610]], i32 [[TMP2606]], i32 [[TMP2609]] +// CHECK-NEXT: store i32 [[TMP2611]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2612:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2613:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2614:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2612]], i32 [[TMP2613]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2615:%.*]] = extractvalue { i32, i1 } [[TMP2614]], 0 +// CHECK-NEXT: [[TMP2616:%.*]] = extractvalue { i32, i1 } [[TMP2614]], 1 +// CHECK-NEXT: [[TMP2617:%.*]] = select i1 [[TMP2616]], i32 [[TMP2612]], i32 [[TMP2615]] +// CHECK-NEXT: store i32 [[TMP2617]], i32* [[UIV]], align 4 +// CHECK-NEXT: [[TMP2618:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2619:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2620:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2618]], i32 [[TMP2619]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2621:%.*]] = extractvalue { i32, i1 } [[TMP2620]], 0 +// CHECK-NEXT: [[TMP2622:%.*]] = extractvalue { i32, i1 } [[TMP2620]], 1 +// CHECK-NEXT: br i1 [[TMP2622]], label [[UIX_ATOMIC_EXIT253:%.*]], label [[UIX_ATOMIC_CONT254:%.*]] +// CHECK: uix.atomic.cont254: +// CHECK-NEXT: store i32 [[TMP2621]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT253]] +// CHECK: uix.atomic.exit253: +// CHECK-NEXT: [[TMP2623:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2624:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2625:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2623]], i32 [[TMP2624]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2626:%.*]] = extractvalue { i32, i1 } [[TMP2625]], 0 +// CHECK-NEXT: [[TMP2627:%.*]] = extractvalue { i32, i1 } [[TMP2625]], 1 +// CHECK-NEXT: br i1 [[TMP2627]], label [[UIX_ATOMIC_EXIT255:%.*]], label [[UIX_ATOMIC_CONT256:%.*]] +// CHECK: uix.atomic.cont256: +// CHECK-NEXT: store i32 [[TMP2626]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT255]] +// CHECK: uix.atomic.exit255: +// CHECK-NEXT: [[TMP2628:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2629:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2630:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2628]], i32 [[TMP2629]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2631:%.*]] = extractvalue { i32, i1 } [[TMP2630]], 1 +// CHECK-NEXT: [[TMP2632:%.*]] = zext i1 [[TMP2631]] to i32 +// CHECK-NEXT: store i32 [[TMP2632]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2633:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2634:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2635:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2633]], i32 [[TMP2634]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2636:%.*]] = extractvalue { i32, i1 } [[TMP2635]], 1 +// CHECK-NEXT: [[TMP2637:%.*]] = zext i1 [[TMP2636]] to i32 +// CHECK-NEXT: store i32 [[TMP2637]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2638:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2639:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2640:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2638]], i32 [[TMP2639]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2641:%.*]] = extractvalue { i32, i1 } [[TMP2640]], 0 +// CHECK-NEXT: [[TMP2642:%.*]] = extractvalue { i32, i1 } [[TMP2640]], 1 +// CHECK-NEXT: br i1 [[TMP2642]], label [[UIX_ATOMIC_EXIT257:%.*]], label [[UIX_ATOMIC_CONT258:%.*]] +// CHECK: uix.atomic.cont258: +// CHECK-NEXT: store i32 [[TMP2641]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT257]] +// CHECK: uix.atomic.exit257: +// CHECK-NEXT: [[TMP2643:%.*]] = extractvalue { i32, i1 } [[TMP2640]], 1 +// CHECK-NEXT: [[TMP2644:%.*]] = zext i1 [[TMP2643]] to i32 +// CHECK-NEXT: store i32 [[TMP2644]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2645:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2646:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2647:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2645]], i32 [[TMP2646]] monotonic monotonic, align 4 +// CHECK-NEXT: [[TMP2648:%.*]] = extractvalue { i32, i1 } [[TMP2647]], 0 +// CHECK-NEXT: [[TMP2649:%.*]] = extractvalue { i32, i1 } [[TMP2647]], 1 +// CHECK-NEXT: br i1 [[TMP2649]], label [[UIX_ATOMIC_EXIT259:%.*]], label [[UIX_ATOMIC_CONT260:%.*]] +// CHECK: uix.atomic.cont260: +// CHECK-NEXT: store i32 [[TMP2648]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT259]] +// CHECK: uix.atomic.exit259: +// CHECK-NEXT: [[TMP2650:%.*]] = extractvalue { i32, i1 } [[TMP2647]], 1 +// CHECK-NEXT: [[TMP2651:%.*]] = zext i1 [[TMP2650]] to i32 +// CHECK-NEXT: store i32 [[TMP2651]], i32* [[UIR]], align 4 +// CHECK-NEXT: [[TMP2652:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2653:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2652]] release, align 4 +// CHECK-NEXT: store i32 [[TMP2653]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2654:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2655:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2654]] release, align 4 +// CHECK-NEXT: store i32 [[TMP2655]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2656:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2657:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2656]] release, align 4 +// CHECK-NEXT: store i32 [[TMP2657]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2658:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2659:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2658]] release, align 4 +// CHECK-NEXT: store i32 [[TMP2659]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2660:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2661:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2662:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2660]], i32 [[TMP2661]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2663:%.*]] = extractvalue { i32, i1 } [[TMP2662]], 0 +// CHECK-NEXT: store i32 [[TMP2663]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2664:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2665:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2666:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2664]], i32 [[TMP2665]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2667:%.*]] = extractvalue { i32, i1 } [[TMP2666]], 0 +// CHECK-NEXT: store i32 [[TMP2667]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2668:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2669:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2668]] release, align 4 +// CHECK-NEXT: [[TMP2670:%.*]] = icmp ugt i32 [[TMP2669]], [[TMP2668]] +// CHECK-NEXT: [[TMP2671:%.*]] = select i1 [[TMP2670]], i32 [[TMP2668]], i32 [[TMP2669]] +// CHECK-NEXT: store i32 [[TMP2671]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2672:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2673:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2672]] release, align 4 +// CHECK-NEXT: [[TMP2674:%.*]] = icmp ult i32 [[TMP2673]], [[TMP2672]] +// CHECK-NEXT: [[TMP2675:%.*]] = select i1 [[TMP2674]], i32 [[TMP2672]], i32 [[TMP2673]] +// CHECK-NEXT: store i32 [[TMP2675]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2676:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2677:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2676]] release, align 4 +// CHECK-NEXT: [[TMP2678:%.*]] = icmp ult i32 [[TMP2677]], [[TMP2676]] +// CHECK-NEXT: [[TMP2679:%.*]] = select i1 [[TMP2678]], i32 [[TMP2676]], i32 [[TMP2677]] +// CHECK-NEXT: store i32 [[TMP2679]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2680:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2681:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2680]] release, align 4 +// CHECK-NEXT: [[TMP2682:%.*]] = icmp ugt i32 [[TMP2681]], [[TMP2680]] +// CHECK-NEXT: [[TMP2683:%.*]] = select i1 [[TMP2682]], i32 [[TMP2680]], i32 [[TMP2681]] +// CHECK-NEXT: store i32 [[TMP2683]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2684:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2685:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2686:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2684]], i32 [[TMP2685]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2687:%.*]] = extractvalue { i32, i1 } [[TMP2686]], 0 +// CHECK-NEXT: [[TMP2688:%.*]] = extractvalue { i32, i1 } [[TMP2686]], 1 +// CHECK-NEXT: [[TMP2689:%.*]] = select i1 [[TMP2688]], i32 [[TMP2684]], i32 [[TMP2687]] +// CHECK-NEXT: store i32 [[TMP2689]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2690:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2691:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2692:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2690]], i32 [[TMP2691]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2693:%.*]] = extractvalue { i32, i1 } [[TMP2692]], 0 +// CHECK-NEXT: [[TMP2694:%.*]] = extractvalue { i32, i1 } [[TMP2692]], 1 +// CHECK-NEXT: [[TMP2695:%.*]] = select i1 [[TMP2694]], i32 [[TMP2690]], i32 [[TMP2693]] +// CHECK-NEXT: store i32 [[TMP2695]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2696:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2697:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2698:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2696]], i32 [[TMP2697]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2699:%.*]] = extractvalue { i32, i1 } [[TMP2698]], 0 +// CHECK-NEXT: [[TMP2700:%.*]] = extractvalue { i32, i1 } [[TMP2698]], 1 +// CHECK-NEXT: br i1 [[TMP2700]], label [[UIX_ATOMIC_EXIT261:%.*]], label [[UIX_ATOMIC_CONT262:%.*]] +// CHECK: uix.atomic.cont262: +// CHECK-NEXT: store i32 [[TMP2699]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT261]] +// CHECK: uix.atomic.exit261: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2701:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2702:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2703:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2701]], i32 [[TMP2702]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2704:%.*]] = extractvalue { i32, i1 } [[TMP2703]], 0 +// CHECK-NEXT: [[TMP2705:%.*]] = extractvalue { i32, i1 } [[TMP2703]], 1 +// CHECK-NEXT: br i1 [[TMP2705]], label [[UIX_ATOMIC_EXIT263:%.*]], label [[UIX_ATOMIC_CONT264:%.*]] +// CHECK: uix.atomic.cont264: +// CHECK-NEXT: store i32 [[TMP2704]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT263]] +// CHECK: uix.atomic.exit263: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2706:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2707:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2708:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2706]], i32 [[TMP2707]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2709:%.*]] = extractvalue { i32, i1 } [[TMP2708]], 1 +// CHECK-NEXT: [[TMP2710:%.*]] = zext i1 [[TMP2709]] to i32 +// CHECK-NEXT: store i32 [[TMP2710]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2711:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2712:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2713:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2711]], i32 [[TMP2712]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2714:%.*]] = extractvalue { i32, i1 } [[TMP2713]], 1 +// CHECK-NEXT: [[TMP2715:%.*]] = zext i1 [[TMP2714]] to i32 +// CHECK-NEXT: store i32 [[TMP2715]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2716:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2717:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2718:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2716]], i32 [[TMP2717]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2719:%.*]] = extractvalue { i32, i1 } [[TMP2718]], 0 +// CHECK-NEXT: [[TMP2720:%.*]] = extractvalue { i32, i1 } [[TMP2718]], 1 +// CHECK-NEXT: br i1 [[TMP2720]], label [[UIX_ATOMIC_EXIT265:%.*]], label [[UIX_ATOMIC_CONT266:%.*]] +// CHECK: uix.atomic.cont266: +// CHECK-NEXT: store i32 [[TMP2719]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT265]] +// CHECK: uix.atomic.exit265: +// CHECK-NEXT: [[TMP2721:%.*]] = extractvalue { i32, i1 } [[TMP2718]], 1 +// CHECK-NEXT: [[TMP2722:%.*]] = zext i1 [[TMP2721]] to i32 +// CHECK-NEXT: store i32 [[TMP2722]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2723:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2724:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2725:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2723]], i32 [[TMP2724]] release monotonic, align 4 +// CHECK-NEXT: [[TMP2726:%.*]] = extractvalue { i32, i1 } [[TMP2725]], 0 +// CHECK-NEXT: [[TMP2727:%.*]] = extractvalue { i32, i1 } [[TMP2725]], 1 +// CHECK-NEXT: br i1 [[TMP2727]], label [[UIX_ATOMIC_EXIT267:%.*]], label [[UIX_ATOMIC_CONT268:%.*]] +// CHECK: uix.atomic.cont268: +// CHECK-NEXT: store i32 [[TMP2726]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT267]] +// CHECK: uix.atomic.exit267: +// CHECK-NEXT: [[TMP2728:%.*]] = extractvalue { i32, i1 } [[TMP2725]], 1 +// CHECK-NEXT: [[TMP2729:%.*]] = zext i1 [[TMP2728]] to i32 +// CHECK-NEXT: store i32 [[TMP2729]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2730:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2731:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2730]] seq_cst, align 4 +// CHECK-NEXT: store i32 [[TMP2731]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2732:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2733:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2732]] seq_cst, align 4 +// CHECK-NEXT: store i32 [[TMP2733]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2734:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2735:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2734]] seq_cst, align 4 +// CHECK-NEXT: store i32 [[TMP2735]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2736:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2737:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2736]] seq_cst, align 4 +// CHECK-NEXT: store i32 [[TMP2737]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2738:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2739:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2740:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2738]], i32 [[TMP2739]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2741:%.*]] = extractvalue { i32, i1 } [[TMP2740]], 0 +// CHECK-NEXT: store i32 [[TMP2741]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2742:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2743:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2744:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2742]], i32 [[TMP2743]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2745:%.*]] = extractvalue { i32, i1 } [[TMP2744]], 0 +// CHECK-NEXT: store i32 [[TMP2745]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2746:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2747:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2746]] seq_cst, align 4 +// CHECK-NEXT: [[TMP2748:%.*]] = icmp ugt i32 [[TMP2747]], [[TMP2746]] +// CHECK-NEXT: [[TMP2749:%.*]] = select i1 [[TMP2748]], i32 [[TMP2746]], i32 [[TMP2747]] +// CHECK-NEXT: store i32 [[TMP2749]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2750:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2751:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2750]] seq_cst, align 4 +// CHECK-NEXT: [[TMP2752:%.*]] = icmp ult i32 [[TMP2751]], [[TMP2750]] +// CHECK-NEXT: [[TMP2753:%.*]] = select i1 [[TMP2752]], i32 [[TMP2750]], i32 [[TMP2751]] +// CHECK-NEXT: store i32 [[TMP2753]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2754:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2755:%.*]] = atomicrmw umin i32* [[UIX]], i32 [[TMP2754]] seq_cst, align 4 +// CHECK-NEXT: [[TMP2756:%.*]] = icmp ult i32 [[TMP2755]], [[TMP2754]] +// CHECK-NEXT: [[TMP2757:%.*]] = select i1 [[TMP2756]], i32 [[TMP2754]], i32 [[TMP2755]] +// CHECK-NEXT: store i32 [[TMP2757]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2758:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2759:%.*]] = atomicrmw umax i32* [[UIX]], i32 [[TMP2758]] seq_cst, align 4 +// CHECK-NEXT: [[TMP2760:%.*]] = icmp ugt i32 [[TMP2759]], [[TMP2758]] +// CHECK-NEXT: [[TMP2761:%.*]] = select i1 [[TMP2760]], i32 [[TMP2758]], i32 [[TMP2759]] +// CHECK-NEXT: store i32 [[TMP2761]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2762:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2763:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2764:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2762]], i32 [[TMP2763]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2765:%.*]] = extractvalue { i32, i1 } [[TMP2764]], 0 +// CHECK-NEXT: [[TMP2766:%.*]] = extractvalue { i32, i1 } [[TMP2764]], 1 +// CHECK-NEXT: [[TMP2767:%.*]] = select i1 [[TMP2766]], i32 [[TMP2762]], i32 [[TMP2765]] +// CHECK-NEXT: store i32 [[TMP2767]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2768:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2769:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2770:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2768]], i32 [[TMP2769]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2771:%.*]] = extractvalue { i32, i1 } [[TMP2770]], 0 +// CHECK-NEXT: [[TMP2772:%.*]] = extractvalue { i32, i1 } [[TMP2770]], 1 +// CHECK-NEXT: [[TMP2773:%.*]] = select i1 [[TMP2772]], i32 [[TMP2768]], i32 [[TMP2771]] +// CHECK-NEXT: store i32 [[TMP2773]], i32* [[UIV]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2774:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2775:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2776:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2774]], i32 [[TMP2775]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2777:%.*]] = extractvalue { i32, i1 } [[TMP2776]], 0 +// CHECK-NEXT: [[TMP2778:%.*]] = extractvalue { i32, i1 } [[TMP2776]], 1 +// CHECK-NEXT: br i1 [[TMP2778]], label [[UIX_ATOMIC_EXIT269:%.*]], label [[UIX_ATOMIC_CONT270:%.*]] +// CHECK: uix.atomic.cont270: +// CHECK-NEXT: store i32 [[TMP2777]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT269]] +// CHECK: uix.atomic.exit269: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2779:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2780:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2781:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2779]], i32 [[TMP2780]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2782:%.*]] = extractvalue { i32, i1 } [[TMP2781]], 0 +// CHECK-NEXT: [[TMP2783:%.*]] = extractvalue { i32, i1 } [[TMP2781]], 1 +// CHECK-NEXT: br i1 [[TMP2783]], label [[UIX_ATOMIC_EXIT271:%.*]], label [[UIX_ATOMIC_CONT272:%.*]] +// CHECK: uix.atomic.cont272: +// CHECK-NEXT: store i32 [[TMP2782]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT271]] +// CHECK: uix.atomic.exit271: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2784:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2785:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2786:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2784]], i32 [[TMP2785]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2787:%.*]] = extractvalue { i32, i1 } [[TMP2786]], 1 +// CHECK-NEXT: [[TMP2788:%.*]] = zext i1 [[TMP2787]] to i32 +// CHECK-NEXT: store i32 [[TMP2788]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2789:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2790:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2791:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2789]], i32 [[TMP2790]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2792:%.*]] = extractvalue { i32, i1 } [[TMP2791]], 1 +// CHECK-NEXT: [[TMP2793:%.*]] = zext i1 [[TMP2792]] to i32 +// CHECK-NEXT: store i32 [[TMP2793]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2794:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2795:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2796:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2794]], i32 [[TMP2795]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2797:%.*]] = extractvalue { i32, i1 } [[TMP2796]], 0 +// CHECK-NEXT: [[TMP2798:%.*]] = extractvalue { i32, i1 } [[TMP2796]], 1 +// CHECK-NEXT: br i1 [[TMP2798]], label [[UIX_ATOMIC_EXIT273:%.*]], label [[UIX_ATOMIC_CONT274:%.*]] +// CHECK: uix.atomic.cont274: +// CHECK-NEXT: store i32 [[TMP2797]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT273]] +// CHECK: uix.atomic.exit273: +// CHECK-NEXT: [[TMP2799:%.*]] = extractvalue { i32, i1 } [[TMP2796]], 1 +// CHECK-NEXT: [[TMP2800:%.*]] = zext i1 [[TMP2799]] to i32 +// CHECK-NEXT: store i32 [[TMP2800]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2801:%.*]] = load i32, i32* [[UIE]], align 4 +// CHECK-NEXT: [[TMP2802:%.*]] = load i32, i32* [[UID]], align 4 +// CHECK-NEXT: [[TMP2803:%.*]] = cmpxchg i32* [[UIX]], i32 [[TMP2801]], i32 [[TMP2802]] seq_cst seq_cst, align 4 +// CHECK-NEXT: [[TMP2804:%.*]] = extractvalue { i32, i1 } [[TMP2803]], 0 +// CHECK-NEXT: [[TMP2805:%.*]] = extractvalue { i32, i1 } [[TMP2803]], 1 +// CHECK-NEXT: br i1 [[TMP2805]], label [[UIX_ATOMIC_EXIT275:%.*]], label [[UIX_ATOMIC_CONT276:%.*]] +// CHECK: uix.atomic.cont276: +// CHECK-NEXT: store i32 [[TMP2804]], i32* [[UIV]], align 4 +// CHECK-NEXT: br label [[UIX_ATOMIC_EXIT275]] +// CHECK: uix.atomic.exit275: +// CHECK-NEXT: [[TMP2806:%.*]] = extractvalue { i32, i1 } [[TMP2803]], 1 +// CHECK-NEXT: [[TMP2807:%.*]] = zext i1 [[TMP2806]] to i32 +// CHECK-NEXT: store i32 [[TMP2807]], i32* [[UIR]], align 4 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2808:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2809:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2808]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2809]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2810:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2811:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2810]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2811]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2812:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2813:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2812]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2813]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2814:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2815:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2814]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2815]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2816:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2817:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2818:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2816]], i64 [[TMP2817]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP2819:%.*]] = extractvalue { i64, i1 } [[TMP2818]], 0 +// CHECK-NEXT: store volatile i64 [[TMP2819]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2820:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2821:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2822:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2820]], i64 [[TMP2821]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP2823:%.*]] = extractvalue { i64, i1 } [[TMP2822]], 0 +// CHECK-NEXT: store volatile i64 [[TMP2823]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2824:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2825:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2824]] monotonic, align 8 +// CHECK-NEXT: [[TMP2826:%.*]] = icmp ugt i64 [[TMP2825]], [[TMP2824]] +// CHECK-NEXT: [[TMP2827:%.*]] = select i1 [[TMP2826]], i64 [[TMP2824]], i64 [[TMP2825]] +// CHECK-NEXT: store volatile i64 [[TMP2827]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2828:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2829:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2828]] monotonic, align 8 +// CHECK-NEXT: [[TMP2830:%.*]] = icmp ult i64 [[TMP2829]], [[TMP2828]] +// CHECK-NEXT: [[TMP2831:%.*]] = select i1 [[TMP2830]], i64 [[TMP2828]], i64 [[TMP2829]] +// CHECK-NEXT: store volatile i64 [[TMP2831]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2832:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2833:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2832]] monotonic, align 8 +// CHECK-NEXT: [[TMP2834:%.*]] = icmp ult i64 [[TMP2833]], [[TMP2832]] +// CHECK-NEXT: [[TMP2835:%.*]] = select i1 [[TMP2834]], i64 [[TMP2832]], i64 [[TMP2833]] +// CHECK-NEXT: store volatile i64 [[TMP2835]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2836:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2837:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2836]] monotonic, align 8 +// CHECK-NEXT: [[TMP2838:%.*]] = icmp ugt i64 [[TMP2837]], [[TMP2836]] +// CHECK-NEXT: [[TMP2839:%.*]] = select i1 [[TMP2838]], i64 [[TMP2836]], i64 [[TMP2837]] +// CHECK-NEXT: store volatile i64 [[TMP2839]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2840:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2841:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2842:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2840]], i64 [[TMP2841]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP2843:%.*]] = extractvalue { i64, i1 } [[TMP2842]], 0 +// CHECK-NEXT: [[TMP2844:%.*]] = extractvalue { i64, i1 } [[TMP2842]], 1 +// CHECK-NEXT: [[TMP2845:%.*]] = select i1 [[TMP2844]], i64 [[TMP2840]], i64 [[TMP2843]] +// CHECK-NEXT: store volatile i64 [[TMP2845]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2846:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2847:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2848:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2846]], i64 [[TMP2847]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP2849:%.*]] = extractvalue { i64, i1 } [[TMP2848]], 0 +// CHECK-NEXT: [[TMP2850:%.*]] = extractvalue { i64, i1 } [[TMP2848]], 1 +// CHECK-NEXT: [[TMP2851:%.*]] = select i1 [[TMP2850]], i64 [[TMP2846]], i64 [[TMP2849]] +// CHECK-NEXT: store volatile i64 [[TMP2851]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2852:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2853:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2854:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2852]], i64 [[TMP2853]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP2855:%.*]] = extractvalue { i64, i1 } [[TMP2854]], 0 +// CHECK-NEXT: [[TMP2856:%.*]] = extractvalue { i64, i1 } [[TMP2854]], 1 +// CHECK-NEXT: br i1 [[TMP2856]], label [[LX_ATOMIC_EXIT:%.*]], label [[LX_ATOMIC_CONT:%.*]] +// CHECK: lx.atomic.cont: +// CHECK-NEXT: store i64 [[TMP2855]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT]] +// CHECK: lx.atomic.exit: +// CHECK-NEXT: [[TMP2857:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2858:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2859:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2857]], i64 [[TMP2858]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP2860:%.*]] = extractvalue { i64, i1 } [[TMP2859]], 0 +// CHECK-NEXT: [[TMP2861:%.*]] = extractvalue { i64, i1 } [[TMP2859]], 1 +// CHECK-NEXT: br i1 [[TMP2861]], label [[LX_ATOMIC_EXIT277:%.*]], label [[LX_ATOMIC_CONT278:%.*]] +// CHECK: lx.atomic.cont278: +// CHECK-NEXT: store i64 [[TMP2860]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT277]] +// CHECK: lx.atomic.exit277: +// CHECK-NEXT: [[TMP2862:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2863:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2864:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2862]], i64 [[TMP2863]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP2865:%.*]] = extractvalue { i64, i1 } [[TMP2864]], 1 +// CHECK-NEXT: [[TMP2866:%.*]] = zext i1 [[TMP2865]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP2866]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP2867:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2868:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2869:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2867]], i64 [[TMP2868]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP2870:%.*]] = extractvalue { i64, i1 } [[TMP2869]], 1 +// CHECK-NEXT: [[TMP2871:%.*]] = zext i1 [[TMP2870]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP2871]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP2872:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2873:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2874:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2872]], i64 [[TMP2873]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP2875:%.*]] = extractvalue { i64, i1 } [[TMP2874]], 0 +// CHECK-NEXT: [[TMP2876:%.*]] = extractvalue { i64, i1 } [[TMP2874]], 1 +// CHECK-NEXT: br i1 [[TMP2876]], label [[LX_ATOMIC_EXIT279:%.*]], label [[LX_ATOMIC_CONT280:%.*]] +// CHECK: lx.atomic.cont280: +// CHECK-NEXT: store i64 [[TMP2875]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT279]] +// CHECK: lx.atomic.exit279: +// CHECK-NEXT: [[TMP2877:%.*]] = extractvalue { i64, i1 } [[TMP2874]], 1 +// CHECK-NEXT: [[TMP2878:%.*]] = zext i1 [[TMP2877]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP2878]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP2879:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2880:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2881:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2879]], i64 [[TMP2880]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP2882:%.*]] = extractvalue { i64, i1 } [[TMP2881]], 0 +// CHECK-NEXT: [[TMP2883:%.*]] = extractvalue { i64, i1 } [[TMP2881]], 1 +// CHECK-NEXT: br i1 [[TMP2883]], label [[LX_ATOMIC_EXIT281:%.*]], label [[LX_ATOMIC_CONT282:%.*]] +// CHECK: lx.atomic.cont282: +// CHECK-NEXT: store i64 [[TMP2882]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT281]] +// CHECK: lx.atomic.exit281: +// CHECK-NEXT: [[TMP2884:%.*]] = extractvalue { i64, i1 } [[TMP2881]], 1 +// CHECK-NEXT: [[TMP2885:%.*]] = zext i1 [[TMP2884]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP2885]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP2886:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2887:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2886]] acq_rel, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2887]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2888:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2889:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2888]] acq_rel, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2889]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2890:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2891:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2890]] acq_rel, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2891]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2892:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2893:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2892]] acq_rel, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2893]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2894:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2895:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2896:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2894]], i64 [[TMP2895]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP2897:%.*]] = extractvalue { i64, i1 } [[TMP2896]], 0 +// CHECK-NEXT: store volatile i64 [[TMP2897]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2898:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2899:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2900:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2898]], i64 [[TMP2899]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP2901:%.*]] = extractvalue { i64, i1 } [[TMP2900]], 0 +// CHECK-NEXT: store volatile i64 [[TMP2901]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2902:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2903:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2902]] acq_rel, align 8 +// CHECK-NEXT: [[TMP2904:%.*]] = icmp ugt i64 [[TMP2903]], [[TMP2902]] +// CHECK-NEXT: [[TMP2905:%.*]] = select i1 [[TMP2904]], i64 [[TMP2902]], i64 [[TMP2903]] +// CHECK-NEXT: store volatile i64 [[TMP2905]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2906:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2907:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2906]] acq_rel, align 8 +// CHECK-NEXT: [[TMP2908:%.*]] = icmp ult i64 [[TMP2907]], [[TMP2906]] +// CHECK-NEXT: [[TMP2909:%.*]] = select i1 [[TMP2908]], i64 [[TMP2906]], i64 [[TMP2907]] +// CHECK-NEXT: store volatile i64 [[TMP2909]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2910:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2911:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2910]] acq_rel, align 8 +// CHECK-NEXT: [[TMP2912:%.*]] = icmp ult i64 [[TMP2911]], [[TMP2910]] +// CHECK-NEXT: [[TMP2913:%.*]] = select i1 [[TMP2912]], i64 [[TMP2910]], i64 [[TMP2911]] +// CHECK-NEXT: store volatile i64 [[TMP2913]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2914:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2915:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2914]] acq_rel, align 8 +// CHECK-NEXT: [[TMP2916:%.*]] = icmp ugt i64 [[TMP2915]], [[TMP2914]] +// CHECK-NEXT: [[TMP2917:%.*]] = select i1 [[TMP2916]], i64 [[TMP2914]], i64 [[TMP2915]] +// CHECK-NEXT: store volatile i64 [[TMP2917]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2918:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2919:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2920:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2918]], i64 [[TMP2919]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP2921:%.*]] = extractvalue { i64, i1 } [[TMP2920]], 0 +// CHECK-NEXT: [[TMP2922:%.*]] = extractvalue { i64, i1 } [[TMP2920]], 1 +// CHECK-NEXT: [[TMP2923:%.*]] = select i1 [[TMP2922]], i64 [[TMP2918]], i64 [[TMP2921]] +// CHECK-NEXT: store volatile i64 [[TMP2923]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2924:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2925:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2926:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2924]], i64 [[TMP2925]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP2927:%.*]] = extractvalue { i64, i1 } [[TMP2926]], 0 +// CHECK-NEXT: [[TMP2928:%.*]] = extractvalue { i64, i1 } [[TMP2926]], 1 +// CHECK-NEXT: [[TMP2929:%.*]] = select i1 [[TMP2928]], i64 [[TMP2924]], i64 [[TMP2927]] +// CHECK-NEXT: store volatile i64 [[TMP2929]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2930:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2931:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2932:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2930]], i64 [[TMP2931]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP2933:%.*]] = extractvalue { i64, i1 } [[TMP2932]], 0 +// CHECK-NEXT: [[TMP2934:%.*]] = extractvalue { i64, i1 } [[TMP2932]], 1 +// CHECK-NEXT: br i1 [[TMP2934]], label [[LX_ATOMIC_EXIT283:%.*]], label [[LX_ATOMIC_CONT284:%.*]] +// CHECK: lx.atomic.cont284: +// CHECK-NEXT: store i64 [[TMP2933]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT283]] +// CHECK: lx.atomic.exit283: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2935:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2936:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2937:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2935]], i64 [[TMP2936]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP2938:%.*]] = extractvalue { i64, i1 } [[TMP2937]], 0 +// CHECK-NEXT: [[TMP2939:%.*]] = extractvalue { i64, i1 } [[TMP2937]], 1 +// CHECK-NEXT: br i1 [[TMP2939]], label [[LX_ATOMIC_EXIT285:%.*]], label [[LX_ATOMIC_CONT286:%.*]] +// CHECK: lx.atomic.cont286: +// CHECK-NEXT: store i64 [[TMP2938]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT285]] +// CHECK: lx.atomic.exit285: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2940:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2941:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2942:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2940]], i64 [[TMP2941]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP2943:%.*]] = extractvalue { i64, i1 } [[TMP2942]], 1 +// CHECK-NEXT: [[TMP2944:%.*]] = zext i1 [[TMP2943]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP2944]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2945:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2946:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2947:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2945]], i64 [[TMP2946]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP2948:%.*]] = extractvalue { i64, i1 } [[TMP2947]], 1 +// CHECK-NEXT: [[TMP2949:%.*]] = zext i1 [[TMP2948]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP2949]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2950:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2951:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2952:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2950]], i64 [[TMP2951]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP2953:%.*]] = extractvalue { i64, i1 } [[TMP2952]], 0 +// CHECK-NEXT: [[TMP2954:%.*]] = extractvalue { i64, i1 } [[TMP2952]], 1 +// CHECK-NEXT: br i1 [[TMP2954]], label [[LX_ATOMIC_EXIT287:%.*]], label [[LX_ATOMIC_CONT288:%.*]] +// CHECK: lx.atomic.cont288: +// CHECK-NEXT: store i64 [[TMP2953]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT287]] +// CHECK: lx.atomic.exit287: +// CHECK-NEXT: [[TMP2955:%.*]] = extractvalue { i64, i1 } [[TMP2952]], 1 +// CHECK-NEXT: [[TMP2956:%.*]] = zext i1 [[TMP2955]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP2956]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2957:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2958:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2959:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2957]], i64 [[TMP2958]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP2960:%.*]] = extractvalue { i64, i1 } [[TMP2959]], 0 +// CHECK-NEXT: [[TMP2961:%.*]] = extractvalue { i64, i1 } [[TMP2959]], 1 +// CHECK-NEXT: br i1 [[TMP2961]], label [[LX_ATOMIC_EXIT289:%.*]], label [[LX_ATOMIC_CONT290:%.*]] +// CHECK: lx.atomic.cont290: +// CHECK-NEXT: store i64 [[TMP2960]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT289]] +// CHECK: lx.atomic.exit289: +// CHECK-NEXT: [[TMP2962:%.*]] = extractvalue { i64, i1 } [[TMP2959]], 1 +// CHECK-NEXT: [[TMP2963:%.*]] = zext i1 [[TMP2962]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP2963]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP2964:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2965:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2964]] acquire, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2965]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2966:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2967:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2966]] acquire, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2967]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2968:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2969:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2968]] acquire, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2969]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2970:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2971:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2970]] acquire, align 8 +// CHECK-NEXT: store volatile i64 [[TMP2971]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2972:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2973:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2974:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2972]], i64 [[TMP2973]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP2975:%.*]] = extractvalue { i64, i1 } [[TMP2974]], 0 +// CHECK-NEXT: store volatile i64 [[TMP2975]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2976:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2977:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2978:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2976]], i64 [[TMP2977]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP2979:%.*]] = extractvalue { i64, i1 } [[TMP2978]], 0 +// CHECK-NEXT: store volatile i64 [[TMP2979]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2980:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2981:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2980]] acquire, align 8 +// CHECK-NEXT: [[TMP2982:%.*]] = icmp ugt i64 [[TMP2981]], [[TMP2980]] +// CHECK-NEXT: [[TMP2983:%.*]] = select i1 [[TMP2982]], i64 [[TMP2980]], i64 [[TMP2981]] +// CHECK-NEXT: store volatile i64 [[TMP2983]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2984:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2985:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2984]] acquire, align 8 +// CHECK-NEXT: [[TMP2986:%.*]] = icmp ult i64 [[TMP2985]], [[TMP2984]] +// CHECK-NEXT: [[TMP2987:%.*]] = select i1 [[TMP2986]], i64 [[TMP2984]], i64 [[TMP2985]] +// CHECK-NEXT: store volatile i64 [[TMP2987]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2988:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2989:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP2988]] acquire, align 8 +// CHECK-NEXT: [[TMP2990:%.*]] = icmp ult i64 [[TMP2989]], [[TMP2988]] +// CHECK-NEXT: [[TMP2991:%.*]] = select i1 [[TMP2990]], i64 [[TMP2988]], i64 [[TMP2989]] +// CHECK-NEXT: store volatile i64 [[TMP2991]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2992:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2993:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP2992]] acquire, align 8 +// CHECK-NEXT: [[TMP2994:%.*]] = icmp ugt i64 [[TMP2993]], [[TMP2992]] +// CHECK-NEXT: [[TMP2995:%.*]] = select i1 [[TMP2994]], i64 [[TMP2992]], i64 [[TMP2993]] +// CHECK-NEXT: store volatile i64 [[TMP2995]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP2996:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP2997:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP2998:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP2996]], i64 [[TMP2997]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP2999:%.*]] = extractvalue { i64, i1 } [[TMP2998]], 0 +// CHECK-NEXT: [[TMP3000:%.*]] = extractvalue { i64, i1 } [[TMP2998]], 1 +// CHECK-NEXT: [[TMP3001:%.*]] = select i1 [[TMP3000]], i64 [[TMP2996]], i64 [[TMP2999]] +// CHECK-NEXT: store volatile i64 [[TMP3001]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3002:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3003:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3004:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3002]], i64 [[TMP3003]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3005:%.*]] = extractvalue { i64, i1 } [[TMP3004]], 0 +// CHECK-NEXT: [[TMP3006:%.*]] = extractvalue { i64, i1 } [[TMP3004]], 1 +// CHECK-NEXT: [[TMP3007:%.*]] = select i1 [[TMP3006]], i64 [[TMP3002]], i64 [[TMP3005]] +// CHECK-NEXT: store volatile i64 [[TMP3007]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3008:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3009:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3010:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3008]], i64 [[TMP3009]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3011:%.*]] = extractvalue { i64, i1 } [[TMP3010]], 0 +// CHECK-NEXT: [[TMP3012:%.*]] = extractvalue { i64, i1 } [[TMP3010]], 1 +// CHECK-NEXT: br i1 [[TMP3012]], label [[LX_ATOMIC_EXIT291:%.*]], label [[LX_ATOMIC_CONT292:%.*]] +// CHECK: lx.atomic.cont292: +// CHECK-NEXT: store i64 [[TMP3011]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT291]] +// CHECK: lx.atomic.exit291: +// CHECK-NEXT: [[TMP3013:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3014:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3015:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3013]], i64 [[TMP3014]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3016:%.*]] = extractvalue { i64, i1 } [[TMP3015]], 0 +// CHECK-NEXT: [[TMP3017:%.*]] = extractvalue { i64, i1 } [[TMP3015]], 1 +// CHECK-NEXT: br i1 [[TMP3017]], label [[LX_ATOMIC_EXIT293:%.*]], label [[LX_ATOMIC_CONT294:%.*]] +// CHECK: lx.atomic.cont294: +// CHECK-NEXT: store i64 [[TMP3016]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT293]] +// CHECK: lx.atomic.exit293: +// CHECK-NEXT: [[TMP3018:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3019:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3020:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3018]], i64 [[TMP3019]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3021:%.*]] = extractvalue { i64, i1 } [[TMP3020]], 1 +// CHECK-NEXT: [[TMP3022:%.*]] = zext i1 [[TMP3021]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3022]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP3023:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3024:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3025:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3023]], i64 [[TMP3024]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3026:%.*]] = extractvalue { i64, i1 } [[TMP3025]], 1 +// CHECK-NEXT: [[TMP3027:%.*]] = zext i1 [[TMP3026]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3027]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP3028:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3029:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3030:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3028]], i64 [[TMP3029]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3031:%.*]] = extractvalue { i64, i1 } [[TMP3030]], 0 +// CHECK-NEXT: [[TMP3032:%.*]] = extractvalue { i64, i1 } [[TMP3030]], 1 +// CHECK-NEXT: br i1 [[TMP3032]], label [[LX_ATOMIC_EXIT295:%.*]], label [[LX_ATOMIC_CONT296:%.*]] +// CHECK: lx.atomic.cont296: +// CHECK-NEXT: store i64 [[TMP3031]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT295]] +// CHECK: lx.atomic.exit295: +// CHECK-NEXT: [[TMP3033:%.*]] = extractvalue { i64, i1 } [[TMP3030]], 1 +// CHECK-NEXT: [[TMP3034:%.*]] = zext i1 [[TMP3033]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3034]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP3035:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3036:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3037:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3035]], i64 [[TMP3036]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3038:%.*]] = extractvalue { i64, i1 } [[TMP3037]], 0 +// CHECK-NEXT: [[TMP3039:%.*]] = extractvalue { i64, i1 } [[TMP3037]], 1 +// CHECK-NEXT: br i1 [[TMP3039]], label [[LX_ATOMIC_EXIT297:%.*]], label [[LX_ATOMIC_CONT298:%.*]] +// CHECK: lx.atomic.cont298: +// CHECK-NEXT: store i64 [[TMP3038]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT297]] +// CHECK: lx.atomic.exit297: +// CHECK-NEXT: [[TMP3040:%.*]] = extractvalue { i64, i1 } [[TMP3037]], 1 +// CHECK-NEXT: [[TMP3041:%.*]] = zext i1 [[TMP3040]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3041]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP3042:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3043:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3042]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3043]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3044:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3045:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3044]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3045]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3046:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3047:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3046]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3047]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3048:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3049:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3048]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3049]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3050:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3051:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3052:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3050]], i64 [[TMP3051]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3053:%.*]] = extractvalue { i64, i1 } [[TMP3052]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3053]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3054:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3055:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3056:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3054]], i64 [[TMP3055]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3057:%.*]] = extractvalue { i64, i1 } [[TMP3056]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3057]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3058:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3059:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3058]] monotonic, align 8 +// CHECK-NEXT: [[TMP3060:%.*]] = icmp ugt i64 [[TMP3059]], [[TMP3058]] +// CHECK-NEXT: [[TMP3061:%.*]] = select i1 [[TMP3060]], i64 [[TMP3058]], i64 [[TMP3059]] +// CHECK-NEXT: store volatile i64 [[TMP3061]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3062:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3063:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3062]] monotonic, align 8 +// CHECK-NEXT: [[TMP3064:%.*]] = icmp ult i64 [[TMP3063]], [[TMP3062]] +// CHECK-NEXT: [[TMP3065:%.*]] = select i1 [[TMP3064]], i64 [[TMP3062]], i64 [[TMP3063]] +// CHECK-NEXT: store volatile i64 [[TMP3065]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3066:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3067:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3066]] monotonic, align 8 +// CHECK-NEXT: [[TMP3068:%.*]] = icmp ult i64 [[TMP3067]], [[TMP3066]] +// CHECK-NEXT: [[TMP3069:%.*]] = select i1 [[TMP3068]], i64 [[TMP3066]], i64 [[TMP3067]] +// CHECK-NEXT: store volatile i64 [[TMP3069]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3070:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3071:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3070]] monotonic, align 8 +// CHECK-NEXT: [[TMP3072:%.*]] = icmp ugt i64 [[TMP3071]], [[TMP3070]] +// CHECK-NEXT: [[TMP3073:%.*]] = select i1 [[TMP3072]], i64 [[TMP3070]], i64 [[TMP3071]] +// CHECK-NEXT: store volatile i64 [[TMP3073]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3074:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3075:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3076:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3074]], i64 [[TMP3075]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3077:%.*]] = extractvalue { i64, i1 } [[TMP3076]], 0 +// CHECK-NEXT: [[TMP3078:%.*]] = extractvalue { i64, i1 } [[TMP3076]], 1 +// CHECK-NEXT: [[TMP3079:%.*]] = select i1 [[TMP3078]], i64 [[TMP3074]], i64 [[TMP3077]] +// CHECK-NEXT: store volatile i64 [[TMP3079]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3080:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3081:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3082:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3080]], i64 [[TMP3081]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3083:%.*]] = extractvalue { i64, i1 } [[TMP3082]], 0 +// CHECK-NEXT: [[TMP3084:%.*]] = extractvalue { i64, i1 } [[TMP3082]], 1 +// CHECK-NEXT: [[TMP3085:%.*]] = select i1 [[TMP3084]], i64 [[TMP3080]], i64 [[TMP3083]] +// CHECK-NEXT: store volatile i64 [[TMP3085]], i64* [[LV]], align 8 +// CHECK-NEXT: [[TMP3086:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3087:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3088:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3086]], i64 [[TMP3087]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3089:%.*]] = extractvalue { i64, i1 } [[TMP3088]], 0 +// CHECK-NEXT: [[TMP3090:%.*]] = extractvalue { i64, i1 } [[TMP3088]], 1 +// CHECK-NEXT: br i1 [[TMP3090]], label [[LX_ATOMIC_EXIT299:%.*]], label [[LX_ATOMIC_CONT300:%.*]] +// CHECK: lx.atomic.cont300: +// CHECK-NEXT: store i64 [[TMP3089]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT299]] +// CHECK: lx.atomic.exit299: +// CHECK-NEXT: [[TMP3091:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3092:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3093:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3091]], i64 [[TMP3092]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3094:%.*]] = extractvalue { i64, i1 } [[TMP3093]], 0 +// CHECK-NEXT: [[TMP3095:%.*]] = extractvalue { i64, i1 } [[TMP3093]], 1 +// CHECK-NEXT: br i1 [[TMP3095]], label [[LX_ATOMIC_EXIT301:%.*]], label [[LX_ATOMIC_CONT302:%.*]] +// CHECK: lx.atomic.cont302: +// CHECK-NEXT: store i64 [[TMP3094]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT301]] +// CHECK: lx.atomic.exit301: +// CHECK-NEXT: [[TMP3096:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3097:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3098:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3096]], i64 [[TMP3097]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3099:%.*]] = extractvalue { i64, i1 } [[TMP3098]], 1 +// CHECK-NEXT: [[TMP3100:%.*]] = zext i1 [[TMP3099]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3100]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP3101:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3102:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3103:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3101]], i64 [[TMP3102]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3104:%.*]] = extractvalue { i64, i1 } [[TMP3103]], 1 +// CHECK-NEXT: [[TMP3105:%.*]] = zext i1 [[TMP3104]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3105]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP3106:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3107:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3108:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3106]], i64 [[TMP3107]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3109:%.*]] = extractvalue { i64, i1 } [[TMP3108]], 0 +// CHECK-NEXT: [[TMP3110:%.*]] = extractvalue { i64, i1 } [[TMP3108]], 1 +// CHECK-NEXT: br i1 [[TMP3110]], label [[LX_ATOMIC_EXIT303:%.*]], label [[LX_ATOMIC_CONT304:%.*]] +// CHECK: lx.atomic.cont304: +// CHECK-NEXT: store i64 [[TMP3109]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT303]] +// CHECK: lx.atomic.exit303: +// CHECK-NEXT: [[TMP3111:%.*]] = extractvalue { i64, i1 } [[TMP3108]], 1 +// CHECK-NEXT: [[TMP3112:%.*]] = zext i1 [[TMP3111]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3112]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP3113:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3114:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3115:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3113]], i64 [[TMP3114]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3116:%.*]] = extractvalue { i64, i1 } [[TMP3115]], 0 +// CHECK-NEXT: [[TMP3117:%.*]] = extractvalue { i64, i1 } [[TMP3115]], 1 +// CHECK-NEXT: br i1 [[TMP3117]], label [[LX_ATOMIC_EXIT305:%.*]], label [[LX_ATOMIC_CONT306:%.*]] +// CHECK: lx.atomic.cont306: +// CHECK-NEXT: store i64 [[TMP3116]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT305]] +// CHECK: lx.atomic.exit305: +// CHECK-NEXT: [[TMP3118:%.*]] = extractvalue { i64, i1 } [[TMP3115]], 1 +// CHECK-NEXT: [[TMP3119:%.*]] = zext i1 [[TMP3118]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3119]], i64* [[LR]], align 8 +// CHECK-NEXT: [[TMP3120:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3121:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3120]] release, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3121]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3122:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3123:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3122]] release, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3123]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3124:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3125:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3124]] release, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3125]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3126:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3127:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3126]] release, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3127]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3128:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3129:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3130:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3128]], i64 [[TMP3129]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3131:%.*]] = extractvalue { i64, i1 } [[TMP3130]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3131]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3132:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3133:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3134:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3132]], i64 [[TMP3133]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3135:%.*]] = extractvalue { i64, i1 } [[TMP3134]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3135]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3136:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3137:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3136]] release, align 8 +// CHECK-NEXT: [[TMP3138:%.*]] = icmp ugt i64 [[TMP3137]], [[TMP3136]] +// CHECK-NEXT: [[TMP3139:%.*]] = select i1 [[TMP3138]], i64 [[TMP3136]], i64 [[TMP3137]] +// CHECK-NEXT: store volatile i64 [[TMP3139]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3140:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3141:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3140]] release, align 8 +// CHECK-NEXT: [[TMP3142:%.*]] = icmp ult i64 [[TMP3141]], [[TMP3140]] +// CHECK-NEXT: [[TMP3143:%.*]] = select i1 [[TMP3142]], i64 [[TMP3140]], i64 [[TMP3141]] +// CHECK-NEXT: store volatile i64 [[TMP3143]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3144:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3145:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3144]] release, align 8 +// CHECK-NEXT: [[TMP3146:%.*]] = icmp ult i64 [[TMP3145]], [[TMP3144]] +// CHECK-NEXT: [[TMP3147:%.*]] = select i1 [[TMP3146]], i64 [[TMP3144]], i64 [[TMP3145]] +// CHECK-NEXT: store volatile i64 [[TMP3147]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3148:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3149:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3148]] release, align 8 +// CHECK-NEXT: [[TMP3150:%.*]] = icmp ugt i64 [[TMP3149]], [[TMP3148]] +// CHECK-NEXT: [[TMP3151:%.*]] = select i1 [[TMP3150]], i64 [[TMP3148]], i64 [[TMP3149]] +// CHECK-NEXT: store volatile i64 [[TMP3151]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3152:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3153:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3154:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3152]], i64 [[TMP3153]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3155:%.*]] = extractvalue { i64, i1 } [[TMP3154]], 0 +// CHECK-NEXT: [[TMP3156:%.*]] = extractvalue { i64, i1 } [[TMP3154]], 1 +// CHECK-NEXT: [[TMP3157:%.*]] = select i1 [[TMP3156]], i64 [[TMP3152]], i64 [[TMP3155]] +// CHECK-NEXT: store volatile i64 [[TMP3157]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3158:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3159:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3160:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3158]], i64 [[TMP3159]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3161:%.*]] = extractvalue { i64, i1 } [[TMP3160]], 0 +// CHECK-NEXT: [[TMP3162:%.*]] = extractvalue { i64, i1 } [[TMP3160]], 1 +// CHECK-NEXT: [[TMP3163:%.*]] = select i1 [[TMP3162]], i64 [[TMP3158]], i64 [[TMP3161]] +// CHECK-NEXT: store volatile i64 [[TMP3163]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3164:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3165:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3166:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3164]], i64 [[TMP3165]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3167:%.*]] = extractvalue { i64, i1 } [[TMP3166]], 0 +// CHECK-NEXT: [[TMP3168:%.*]] = extractvalue { i64, i1 } [[TMP3166]], 1 +// CHECK-NEXT: br i1 [[TMP3168]], label [[LX_ATOMIC_EXIT307:%.*]], label [[LX_ATOMIC_CONT308:%.*]] +// CHECK: lx.atomic.cont308: +// CHECK-NEXT: store i64 [[TMP3167]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT307]] +// CHECK: lx.atomic.exit307: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3169:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3170:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3171:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3169]], i64 [[TMP3170]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3172:%.*]] = extractvalue { i64, i1 } [[TMP3171]], 0 +// CHECK-NEXT: [[TMP3173:%.*]] = extractvalue { i64, i1 } [[TMP3171]], 1 +// CHECK-NEXT: br i1 [[TMP3173]], label [[LX_ATOMIC_EXIT309:%.*]], label [[LX_ATOMIC_CONT310:%.*]] +// CHECK: lx.atomic.cont310: +// CHECK-NEXT: store i64 [[TMP3172]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT309]] +// CHECK: lx.atomic.exit309: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3174:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3175:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3176:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3174]], i64 [[TMP3175]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3177:%.*]] = extractvalue { i64, i1 } [[TMP3176]], 1 +// CHECK-NEXT: [[TMP3178:%.*]] = zext i1 [[TMP3177]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3178]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3179:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3180:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3181:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3179]], i64 [[TMP3180]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3182:%.*]] = extractvalue { i64, i1 } [[TMP3181]], 1 +// CHECK-NEXT: [[TMP3183:%.*]] = zext i1 [[TMP3182]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3183]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3184:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3185:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3186:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3184]], i64 [[TMP3185]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3187:%.*]] = extractvalue { i64, i1 } [[TMP3186]], 0 +// CHECK-NEXT: [[TMP3188:%.*]] = extractvalue { i64, i1 } [[TMP3186]], 1 +// CHECK-NEXT: br i1 [[TMP3188]], label [[LX_ATOMIC_EXIT311:%.*]], label [[LX_ATOMIC_CONT312:%.*]] +// CHECK: lx.atomic.cont312: +// CHECK-NEXT: store i64 [[TMP3187]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT311]] +// CHECK: lx.atomic.exit311: +// CHECK-NEXT: [[TMP3189:%.*]] = extractvalue { i64, i1 } [[TMP3186]], 1 +// CHECK-NEXT: [[TMP3190:%.*]] = zext i1 [[TMP3189]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3190]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3191:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3192:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3193:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3191]], i64 [[TMP3192]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3194:%.*]] = extractvalue { i64, i1 } [[TMP3193]], 0 +// CHECK-NEXT: [[TMP3195:%.*]] = extractvalue { i64, i1 } [[TMP3193]], 1 +// CHECK-NEXT: br i1 [[TMP3195]], label [[LX_ATOMIC_EXIT313:%.*]], label [[LX_ATOMIC_CONT314:%.*]] +// CHECK: lx.atomic.cont314: +// CHECK-NEXT: store i64 [[TMP3194]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT313]] +// CHECK: lx.atomic.exit313: +// CHECK-NEXT: [[TMP3196:%.*]] = extractvalue { i64, i1 } [[TMP3193]], 1 +// CHECK-NEXT: [[TMP3197:%.*]] = zext i1 [[TMP3196]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3197]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3198:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3199:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3198]] seq_cst, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3199]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3200:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3201:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3200]] seq_cst, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3201]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3202:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3203:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3202]] seq_cst, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3203]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3204:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3205:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3204]] seq_cst, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3205]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3206:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3207:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3208:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3206]], i64 [[TMP3207]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3209:%.*]] = extractvalue { i64, i1 } [[TMP3208]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3209]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3210:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3211:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3212:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3210]], i64 [[TMP3211]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3213:%.*]] = extractvalue { i64, i1 } [[TMP3212]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3213]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3214:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3215:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3214]] seq_cst, align 8 +// CHECK-NEXT: [[TMP3216:%.*]] = icmp ugt i64 [[TMP3215]], [[TMP3214]] +// CHECK-NEXT: [[TMP3217:%.*]] = select i1 [[TMP3216]], i64 [[TMP3214]], i64 [[TMP3215]] +// CHECK-NEXT: store volatile i64 [[TMP3217]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3218:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3219:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3218]] seq_cst, align 8 +// CHECK-NEXT: [[TMP3220:%.*]] = icmp ult i64 [[TMP3219]], [[TMP3218]] +// CHECK-NEXT: [[TMP3221:%.*]] = select i1 [[TMP3220]], i64 [[TMP3218]], i64 [[TMP3219]] +// CHECK-NEXT: store volatile i64 [[TMP3221]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3222:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3223:%.*]] = atomicrmw umin i64* [[LX]], i64 [[TMP3222]] seq_cst, align 8 +// CHECK-NEXT: [[TMP3224:%.*]] = icmp ult i64 [[TMP3223]], [[TMP3222]] +// CHECK-NEXT: [[TMP3225:%.*]] = select i1 [[TMP3224]], i64 [[TMP3222]], i64 [[TMP3223]] +// CHECK-NEXT: store volatile i64 [[TMP3225]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3226:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3227:%.*]] = atomicrmw umax i64* [[LX]], i64 [[TMP3226]] seq_cst, align 8 +// CHECK-NEXT: [[TMP3228:%.*]] = icmp ugt i64 [[TMP3227]], [[TMP3226]] +// CHECK-NEXT: [[TMP3229:%.*]] = select i1 [[TMP3228]], i64 [[TMP3226]], i64 [[TMP3227]] +// CHECK-NEXT: store volatile i64 [[TMP3229]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3230:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3231:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3232:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3230]], i64 [[TMP3231]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3233:%.*]] = extractvalue { i64, i1 } [[TMP3232]], 0 +// CHECK-NEXT: [[TMP3234:%.*]] = extractvalue { i64, i1 } [[TMP3232]], 1 +// CHECK-NEXT: [[TMP3235:%.*]] = select i1 [[TMP3234]], i64 [[TMP3230]], i64 [[TMP3233]] +// CHECK-NEXT: store volatile i64 [[TMP3235]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3236:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3237:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3238:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3236]], i64 [[TMP3237]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3239:%.*]] = extractvalue { i64, i1 } [[TMP3238]], 0 +// CHECK-NEXT: [[TMP3240:%.*]] = extractvalue { i64, i1 } [[TMP3238]], 1 +// CHECK-NEXT: [[TMP3241:%.*]] = select i1 [[TMP3240]], i64 [[TMP3236]], i64 [[TMP3239]] +// CHECK-NEXT: store volatile i64 [[TMP3241]], i64* [[LV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3242:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3243:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3244:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3242]], i64 [[TMP3243]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3245:%.*]] = extractvalue { i64, i1 } [[TMP3244]], 0 +// CHECK-NEXT: [[TMP3246:%.*]] = extractvalue { i64, i1 } [[TMP3244]], 1 +// CHECK-NEXT: br i1 [[TMP3246]], label [[LX_ATOMIC_EXIT315:%.*]], label [[LX_ATOMIC_CONT316:%.*]] +// CHECK: lx.atomic.cont316: +// CHECK-NEXT: store i64 [[TMP3245]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT315]] +// CHECK: lx.atomic.exit315: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3247:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3248:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3249:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3247]], i64 [[TMP3248]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3250:%.*]] = extractvalue { i64, i1 } [[TMP3249]], 0 +// CHECK-NEXT: [[TMP3251:%.*]] = extractvalue { i64, i1 } [[TMP3249]], 1 +// CHECK-NEXT: br i1 [[TMP3251]], label [[LX_ATOMIC_EXIT317:%.*]], label [[LX_ATOMIC_CONT318:%.*]] +// CHECK: lx.atomic.cont318: +// CHECK-NEXT: store i64 [[TMP3250]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT317]] +// CHECK: lx.atomic.exit317: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3252:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3253:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3254:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3252]], i64 [[TMP3253]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3255:%.*]] = extractvalue { i64, i1 } [[TMP3254]], 1 +// CHECK-NEXT: [[TMP3256:%.*]] = zext i1 [[TMP3255]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3256]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3257:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3258:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3259:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3257]], i64 [[TMP3258]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3260:%.*]] = extractvalue { i64, i1 } [[TMP3259]], 1 +// CHECK-NEXT: [[TMP3261:%.*]] = zext i1 [[TMP3260]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3261]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3262:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3263:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3264:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3262]], i64 [[TMP3263]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3265:%.*]] = extractvalue { i64, i1 } [[TMP3264]], 0 +// CHECK-NEXT: [[TMP3266:%.*]] = extractvalue { i64, i1 } [[TMP3264]], 1 +// CHECK-NEXT: br i1 [[TMP3266]], label [[LX_ATOMIC_EXIT319:%.*]], label [[LX_ATOMIC_CONT320:%.*]] +// CHECK: lx.atomic.cont320: +// CHECK-NEXT: store i64 [[TMP3265]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT319]] +// CHECK: lx.atomic.exit319: +// CHECK-NEXT: [[TMP3267:%.*]] = extractvalue { i64, i1 } [[TMP3264]], 1 +// CHECK-NEXT: [[TMP3268:%.*]] = zext i1 [[TMP3267]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3268]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3269:%.*]] = load i64, i64* [[LE]], align 8 +// CHECK-NEXT: [[TMP3270:%.*]] = load i64, i64* [[LD]], align 8 +// CHECK-NEXT: [[TMP3271:%.*]] = cmpxchg i64* [[LX]], i64 [[TMP3269]], i64 [[TMP3270]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3272:%.*]] = extractvalue { i64, i1 } [[TMP3271]], 0 +// CHECK-NEXT: [[TMP3273:%.*]] = extractvalue { i64, i1 } [[TMP3271]], 1 +// CHECK-NEXT: br i1 [[TMP3273]], label [[LX_ATOMIC_EXIT321:%.*]], label [[LX_ATOMIC_CONT322:%.*]] +// CHECK: lx.atomic.cont322: +// CHECK-NEXT: store i64 [[TMP3272]], i64* [[LV]], align 8 +// CHECK-NEXT: br label [[LX_ATOMIC_EXIT321]] +// CHECK: lx.atomic.exit321: +// CHECK-NEXT: [[TMP3274:%.*]] = extractvalue { i64, i1 } [[TMP3271]], 1 +// CHECK-NEXT: [[TMP3275:%.*]] = zext i1 [[TMP3274]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3275]], i64* [[LR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3276:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3277:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3276]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP3277]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3278:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3279:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3278]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP3279]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3280:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3281:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3280]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP3281]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3282:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3283:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3282]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP3283]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3284:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3285:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3286:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3284]], i64 [[TMP3285]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3287:%.*]] = extractvalue { i64, i1 } [[TMP3286]], 0 +// CHECK-NEXT: store i64 [[TMP3287]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3288:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3289:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3290:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3288]], i64 [[TMP3289]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3291:%.*]] = extractvalue { i64, i1 } [[TMP3290]], 0 +// CHECK-NEXT: store i64 [[TMP3291]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3292:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3293:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3292]] monotonic, align 8 +// CHECK-NEXT: [[TMP3294:%.*]] = icmp ugt i64 [[TMP3293]], [[TMP3292]] +// CHECK-NEXT: [[TMP3295:%.*]] = select i1 [[TMP3294]], i64 [[TMP3292]], i64 [[TMP3293]] +// CHECK-NEXT: store i64 [[TMP3295]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3296:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3297:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3296]] monotonic, align 8 +// CHECK-NEXT: [[TMP3298:%.*]] = icmp ult i64 [[TMP3297]], [[TMP3296]] +// CHECK-NEXT: [[TMP3299:%.*]] = select i1 [[TMP3298]], i64 [[TMP3296]], i64 [[TMP3297]] +// CHECK-NEXT: store i64 [[TMP3299]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3300:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3301:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3300]] monotonic, align 8 +// CHECK-NEXT: [[TMP3302:%.*]] = icmp ult i64 [[TMP3301]], [[TMP3300]] +// CHECK-NEXT: [[TMP3303:%.*]] = select i1 [[TMP3302]], i64 [[TMP3300]], i64 [[TMP3301]] +// CHECK-NEXT: store i64 [[TMP3303]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3304:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3305:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3304]] monotonic, align 8 +// CHECK-NEXT: [[TMP3306:%.*]] = icmp ugt i64 [[TMP3305]], [[TMP3304]] +// CHECK-NEXT: [[TMP3307:%.*]] = select i1 [[TMP3306]], i64 [[TMP3304]], i64 [[TMP3305]] +// CHECK-NEXT: store i64 [[TMP3307]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3308:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3309:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3310:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3308]], i64 [[TMP3309]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3311:%.*]] = extractvalue { i64, i1 } [[TMP3310]], 0 +// CHECK-NEXT: [[TMP3312:%.*]] = extractvalue { i64, i1 } [[TMP3310]], 1 +// CHECK-NEXT: [[TMP3313:%.*]] = select i1 [[TMP3312]], i64 [[TMP3308]], i64 [[TMP3311]] +// CHECK-NEXT: store i64 [[TMP3313]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3314:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3315:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3316:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3314]], i64 [[TMP3315]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3317:%.*]] = extractvalue { i64, i1 } [[TMP3316]], 0 +// CHECK-NEXT: [[TMP3318:%.*]] = extractvalue { i64, i1 } [[TMP3316]], 1 +// CHECK-NEXT: [[TMP3319:%.*]] = select i1 [[TMP3318]], i64 [[TMP3314]], i64 [[TMP3317]] +// CHECK-NEXT: store i64 [[TMP3319]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3320:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3321:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3322:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3320]], i64 [[TMP3321]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3323:%.*]] = extractvalue { i64, i1 } [[TMP3322]], 0 +// CHECK-NEXT: [[TMP3324:%.*]] = extractvalue { i64, i1 } [[TMP3322]], 1 +// CHECK-NEXT: br i1 [[TMP3324]], label [[ULX_ATOMIC_EXIT:%.*]], label [[ULX_ATOMIC_CONT:%.*]] +// CHECK: ulx.atomic.cont: +// CHECK-NEXT: store i64 [[TMP3323]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT]] +// CHECK: ulx.atomic.exit: +// CHECK-NEXT: [[TMP3325:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3326:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3327:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3325]], i64 [[TMP3326]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3328:%.*]] = extractvalue { i64, i1 } [[TMP3327]], 0 +// CHECK-NEXT: [[TMP3329:%.*]] = extractvalue { i64, i1 } [[TMP3327]], 1 +// CHECK-NEXT: br i1 [[TMP3329]], label [[ULX_ATOMIC_EXIT323:%.*]], label [[ULX_ATOMIC_CONT324:%.*]] +// CHECK: ulx.atomic.cont324: +// CHECK-NEXT: store i64 [[TMP3328]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT323]] +// CHECK: ulx.atomic.exit323: +// CHECK-NEXT: [[TMP3330:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3331:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3332:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3330]], i64 [[TMP3331]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3333:%.*]] = extractvalue { i64, i1 } [[TMP3332]], 1 +// CHECK-NEXT: [[TMP3334:%.*]] = zext i1 [[TMP3333]] to i64 +// CHECK-NEXT: store i64 [[TMP3334]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3335:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3336:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3337:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3335]], i64 [[TMP3336]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3338:%.*]] = extractvalue { i64, i1 } [[TMP3337]], 1 +// CHECK-NEXT: [[TMP3339:%.*]] = zext i1 [[TMP3338]] to i64 +// CHECK-NEXT: store i64 [[TMP3339]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3340:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3341:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3342:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3340]], i64 [[TMP3341]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3343:%.*]] = extractvalue { i64, i1 } [[TMP3342]], 0 +// CHECK-NEXT: [[TMP3344:%.*]] = extractvalue { i64, i1 } [[TMP3342]], 1 +// CHECK-NEXT: br i1 [[TMP3344]], label [[ULX_ATOMIC_EXIT325:%.*]], label [[ULX_ATOMIC_CONT326:%.*]] +// CHECK: ulx.atomic.cont326: +// CHECK-NEXT: store i64 [[TMP3343]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT325]] +// CHECK: ulx.atomic.exit325: +// CHECK-NEXT: [[TMP3345:%.*]] = extractvalue { i64, i1 } [[TMP3342]], 1 +// CHECK-NEXT: [[TMP3346:%.*]] = zext i1 [[TMP3345]] to i64 +// CHECK-NEXT: store i64 [[TMP3346]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3347:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3348:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3349:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3347]], i64 [[TMP3348]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3350:%.*]] = extractvalue { i64, i1 } [[TMP3349]], 0 +// CHECK-NEXT: [[TMP3351:%.*]] = extractvalue { i64, i1 } [[TMP3349]], 1 +// CHECK-NEXT: br i1 [[TMP3351]], label [[ULX_ATOMIC_EXIT327:%.*]], label [[ULX_ATOMIC_CONT328:%.*]] +// CHECK: ulx.atomic.cont328: +// CHECK-NEXT: store i64 [[TMP3350]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT327]] +// CHECK: ulx.atomic.exit327: +// CHECK-NEXT: [[TMP3352:%.*]] = extractvalue { i64, i1 } [[TMP3349]], 1 +// CHECK-NEXT: [[TMP3353:%.*]] = zext i1 [[TMP3352]] to i64 +// CHECK-NEXT: store i64 [[TMP3353]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3354:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3355:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3354]] acq_rel, align 8 +// CHECK-NEXT: store i64 [[TMP3355]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3356:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3357:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3356]] acq_rel, align 8 +// CHECK-NEXT: store i64 [[TMP3357]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3358:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3359:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3358]] acq_rel, align 8 +// CHECK-NEXT: store i64 [[TMP3359]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3360:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3361:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3360]] acq_rel, align 8 +// CHECK-NEXT: store i64 [[TMP3361]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3362:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3363:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3364:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3362]], i64 [[TMP3363]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3365:%.*]] = extractvalue { i64, i1 } [[TMP3364]], 0 +// CHECK-NEXT: store i64 [[TMP3365]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3366:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3367:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3368:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3366]], i64 [[TMP3367]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3369:%.*]] = extractvalue { i64, i1 } [[TMP3368]], 0 +// CHECK-NEXT: store i64 [[TMP3369]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3370:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3371:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3370]] acq_rel, align 8 +// CHECK-NEXT: [[TMP3372:%.*]] = icmp ugt i64 [[TMP3371]], [[TMP3370]] +// CHECK-NEXT: [[TMP3373:%.*]] = select i1 [[TMP3372]], i64 [[TMP3370]], i64 [[TMP3371]] +// CHECK-NEXT: store i64 [[TMP3373]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3374:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3375:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3374]] acq_rel, align 8 +// CHECK-NEXT: [[TMP3376:%.*]] = icmp ult i64 [[TMP3375]], [[TMP3374]] +// CHECK-NEXT: [[TMP3377:%.*]] = select i1 [[TMP3376]], i64 [[TMP3374]], i64 [[TMP3375]] +// CHECK-NEXT: store i64 [[TMP3377]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3378:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3379:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3378]] acq_rel, align 8 +// CHECK-NEXT: [[TMP3380:%.*]] = icmp ult i64 [[TMP3379]], [[TMP3378]] +// CHECK-NEXT: [[TMP3381:%.*]] = select i1 [[TMP3380]], i64 [[TMP3378]], i64 [[TMP3379]] +// CHECK-NEXT: store i64 [[TMP3381]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3382:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3383:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3382]] acq_rel, align 8 +// CHECK-NEXT: [[TMP3384:%.*]] = icmp ugt i64 [[TMP3383]], [[TMP3382]] +// CHECK-NEXT: [[TMP3385:%.*]] = select i1 [[TMP3384]], i64 [[TMP3382]], i64 [[TMP3383]] +// CHECK-NEXT: store i64 [[TMP3385]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3386:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3387:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3388:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3386]], i64 [[TMP3387]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3389:%.*]] = extractvalue { i64, i1 } [[TMP3388]], 0 +// CHECK-NEXT: [[TMP3390:%.*]] = extractvalue { i64, i1 } [[TMP3388]], 1 +// CHECK-NEXT: [[TMP3391:%.*]] = select i1 [[TMP3390]], i64 [[TMP3386]], i64 [[TMP3389]] +// CHECK-NEXT: store i64 [[TMP3391]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3392:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3393:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3394:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3392]], i64 [[TMP3393]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3395:%.*]] = extractvalue { i64, i1 } [[TMP3394]], 0 +// CHECK-NEXT: [[TMP3396:%.*]] = extractvalue { i64, i1 } [[TMP3394]], 1 +// CHECK-NEXT: [[TMP3397:%.*]] = select i1 [[TMP3396]], i64 [[TMP3392]], i64 [[TMP3395]] +// CHECK-NEXT: store i64 [[TMP3397]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3398:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3399:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3400:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3398]], i64 [[TMP3399]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3401:%.*]] = extractvalue { i64, i1 } [[TMP3400]], 0 +// CHECK-NEXT: [[TMP3402:%.*]] = extractvalue { i64, i1 } [[TMP3400]], 1 +// CHECK-NEXT: br i1 [[TMP3402]], label [[ULX_ATOMIC_EXIT329:%.*]], label [[ULX_ATOMIC_CONT330:%.*]] +// CHECK: ulx.atomic.cont330: +// CHECK-NEXT: store i64 [[TMP3401]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT329]] +// CHECK: ulx.atomic.exit329: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3403:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3404:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3405:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3403]], i64 [[TMP3404]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3406:%.*]] = extractvalue { i64, i1 } [[TMP3405]], 0 +// CHECK-NEXT: [[TMP3407:%.*]] = extractvalue { i64, i1 } [[TMP3405]], 1 +// CHECK-NEXT: br i1 [[TMP3407]], label [[ULX_ATOMIC_EXIT331:%.*]], label [[ULX_ATOMIC_CONT332:%.*]] +// CHECK: ulx.atomic.cont332: +// CHECK-NEXT: store i64 [[TMP3406]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT331]] +// CHECK: ulx.atomic.exit331: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3408:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3409:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3410:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3408]], i64 [[TMP3409]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3411:%.*]] = extractvalue { i64, i1 } [[TMP3410]], 1 +// CHECK-NEXT: [[TMP3412:%.*]] = zext i1 [[TMP3411]] to i64 +// CHECK-NEXT: store i64 [[TMP3412]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3413:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3414:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3415:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3413]], i64 [[TMP3414]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3416:%.*]] = extractvalue { i64, i1 } [[TMP3415]], 1 +// CHECK-NEXT: [[TMP3417:%.*]] = zext i1 [[TMP3416]] to i64 +// CHECK-NEXT: store i64 [[TMP3417]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3418:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3419:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3420:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3418]], i64 [[TMP3419]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3421:%.*]] = extractvalue { i64, i1 } [[TMP3420]], 0 +// CHECK-NEXT: [[TMP3422:%.*]] = extractvalue { i64, i1 } [[TMP3420]], 1 +// CHECK-NEXT: br i1 [[TMP3422]], label [[ULX_ATOMIC_EXIT333:%.*]], label [[ULX_ATOMIC_CONT334:%.*]] +// CHECK: ulx.atomic.cont334: +// CHECK-NEXT: store i64 [[TMP3421]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT333]] +// CHECK: ulx.atomic.exit333: +// CHECK-NEXT: [[TMP3423:%.*]] = extractvalue { i64, i1 } [[TMP3420]], 1 +// CHECK-NEXT: [[TMP3424:%.*]] = zext i1 [[TMP3423]] to i64 +// CHECK-NEXT: store i64 [[TMP3424]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3425:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3426:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3427:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3425]], i64 [[TMP3426]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3428:%.*]] = extractvalue { i64, i1 } [[TMP3427]], 0 +// CHECK-NEXT: [[TMP3429:%.*]] = extractvalue { i64, i1 } [[TMP3427]], 1 +// CHECK-NEXT: br i1 [[TMP3429]], label [[ULX_ATOMIC_EXIT335:%.*]], label [[ULX_ATOMIC_CONT336:%.*]] +// CHECK: ulx.atomic.cont336: +// CHECK-NEXT: store i64 [[TMP3428]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT335]] +// CHECK: ulx.atomic.exit335: +// CHECK-NEXT: [[TMP3430:%.*]] = extractvalue { i64, i1 } [[TMP3427]], 1 +// CHECK-NEXT: [[TMP3431:%.*]] = zext i1 [[TMP3430]] to i64 +// CHECK-NEXT: store i64 [[TMP3431]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3432:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3433:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3432]] acquire, align 8 +// CHECK-NEXT: store i64 [[TMP3433]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3434:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3435:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3434]] acquire, align 8 +// CHECK-NEXT: store i64 [[TMP3435]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3436:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3437:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3436]] acquire, align 8 +// CHECK-NEXT: store i64 [[TMP3437]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3438:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3439:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3438]] acquire, align 8 +// CHECK-NEXT: store i64 [[TMP3439]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3440:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3441:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3442:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3440]], i64 [[TMP3441]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3443:%.*]] = extractvalue { i64, i1 } [[TMP3442]], 0 +// CHECK-NEXT: store i64 [[TMP3443]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3444:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3445:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3446:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3444]], i64 [[TMP3445]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3447:%.*]] = extractvalue { i64, i1 } [[TMP3446]], 0 +// CHECK-NEXT: store i64 [[TMP3447]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3448:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3449:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3448]] acquire, align 8 +// CHECK-NEXT: [[TMP3450:%.*]] = icmp ugt i64 [[TMP3449]], [[TMP3448]] +// CHECK-NEXT: [[TMP3451:%.*]] = select i1 [[TMP3450]], i64 [[TMP3448]], i64 [[TMP3449]] +// CHECK-NEXT: store i64 [[TMP3451]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3452:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3453:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3452]] acquire, align 8 +// CHECK-NEXT: [[TMP3454:%.*]] = icmp ult i64 [[TMP3453]], [[TMP3452]] +// CHECK-NEXT: [[TMP3455:%.*]] = select i1 [[TMP3454]], i64 [[TMP3452]], i64 [[TMP3453]] +// CHECK-NEXT: store i64 [[TMP3455]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3456:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3457:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3456]] acquire, align 8 +// CHECK-NEXT: [[TMP3458:%.*]] = icmp ult i64 [[TMP3457]], [[TMP3456]] +// CHECK-NEXT: [[TMP3459:%.*]] = select i1 [[TMP3458]], i64 [[TMP3456]], i64 [[TMP3457]] +// CHECK-NEXT: store i64 [[TMP3459]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3460:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3461:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3460]] acquire, align 8 +// CHECK-NEXT: [[TMP3462:%.*]] = icmp ugt i64 [[TMP3461]], [[TMP3460]] +// CHECK-NEXT: [[TMP3463:%.*]] = select i1 [[TMP3462]], i64 [[TMP3460]], i64 [[TMP3461]] +// CHECK-NEXT: store i64 [[TMP3463]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3464:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3465:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3466:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3464]], i64 [[TMP3465]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3467:%.*]] = extractvalue { i64, i1 } [[TMP3466]], 0 +// CHECK-NEXT: [[TMP3468:%.*]] = extractvalue { i64, i1 } [[TMP3466]], 1 +// CHECK-NEXT: [[TMP3469:%.*]] = select i1 [[TMP3468]], i64 [[TMP3464]], i64 [[TMP3467]] +// CHECK-NEXT: store i64 [[TMP3469]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3470:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3471:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3472:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3470]], i64 [[TMP3471]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3473:%.*]] = extractvalue { i64, i1 } [[TMP3472]], 0 +// CHECK-NEXT: [[TMP3474:%.*]] = extractvalue { i64, i1 } [[TMP3472]], 1 +// CHECK-NEXT: [[TMP3475:%.*]] = select i1 [[TMP3474]], i64 [[TMP3470]], i64 [[TMP3473]] +// CHECK-NEXT: store i64 [[TMP3475]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3476:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3477:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3478:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3476]], i64 [[TMP3477]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3479:%.*]] = extractvalue { i64, i1 } [[TMP3478]], 0 +// CHECK-NEXT: [[TMP3480:%.*]] = extractvalue { i64, i1 } [[TMP3478]], 1 +// CHECK-NEXT: br i1 [[TMP3480]], label [[ULX_ATOMIC_EXIT337:%.*]], label [[ULX_ATOMIC_CONT338:%.*]] +// CHECK: ulx.atomic.cont338: +// CHECK-NEXT: store i64 [[TMP3479]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT337]] +// CHECK: ulx.atomic.exit337: +// CHECK-NEXT: [[TMP3481:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3482:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3483:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3481]], i64 [[TMP3482]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3484:%.*]] = extractvalue { i64, i1 } [[TMP3483]], 0 +// CHECK-NEXT: [[TMP3485:%.*]] = extractvalue { i64, i1 } [[TMP3483]], 1 +// CHECK-NEXT: br i1 [[TMP3485]], label [[ULX_ATOMIC_EXIT339:%.*]], label [[ULX_ATOMIC_CONT340:%.*]] +// CHECK: ulx.atomic.cont340: +// CHECK-NEXT: store i64 [[TMP3484]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT339]] +// CHECK: ulx.atomic.exit339: +// CHECK-NEXT: [[TMP3486:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3487:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3488:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3486]], i64 [[TMP3487]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3489:%.*]] = extractvalue { i64, i1 } [[TMP3488]], 1 +// CHECK-NEXT: [[TMP3490:%.*]] = zext i1 [[TMP3489]] to i64 +// CHECK-NEXT: store i64 [[TMP3490]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3491:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3492:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3493:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3491]], i64 [[TMP3492]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3494:%.*]] = extractvalue { i64, i1 } [[TMP3493]], 1 +// CHECK-NEXT: [[TMP3495:%.*]] = zext i1 [[TMP3494]] to i64 +// CHECK-NEXT: store i64 [[TMP3495]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3496:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3497:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3498:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3496]], i64 [[TMP3497]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3499:%.*]] = extractvalue { i64, i1 } [[TMP3498]], 0 +// CHECK-NEXT: [[TMP3500:%.*]] = extractvalue { i64, i1 } [[TMP3498]], 1 +// CHECK-NEXT: br i1 [[TMP3500]], label [[ULX_ATOMIC_EXIT341:%.*]], label [[ULX_ATOMIC_CONT342:%.*]] +// CHECK: ulx.atomic.cont342: +// CHECK-NEXT: store i64 [[TMP3499]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT341]] +// CHECK: ulx.atomic.exit341: +// CHECK-NEXT: [[TMP3501:%.*]] = extractvalue { i64, i1 } [[TMP3498]], 1 +// CHECK-NEXT: [[TMP3502:%.*]] = zext i1 [[TMP3501]] to i64 +// CHECK-NEXT: store i64 [[TMP3502]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3503:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3504:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3505:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3503]], i64 [[TMP3504]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3506:%.*]] = extractvalue { i64, i1 } [[TMP3505]], 0 +// CHECK-NEXT: [[TMP3507:%.*]] = extractvalue { i64, i1 } [[TMP3505]], 1 +// CHECK-NEXT: br i1 [[TMP3507]], label [[ULX_ATOMIC_EXIT343:%.*]], label [[ULX_ATOMIC_CONT344:%.*]] +// CHECK: ulx.atomic.cont344: +// CHECK-NEXT: store i64 [[TMP3506]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT343]] +// CHECK: ulx.atomic.exit343: +// CHECK-NEXT: [[TMP3508:%.*]] = extractvalue { i64, i1 } [[TMP3505]], 1 +// CHECK-NEXT: [[TMP3509:%.*]] = zext i1 [[TMP3508]] to i64 +// CHECK-NEXT: store i64 [[TMP3509]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3510:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3511:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3510]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP3511]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3512:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3513:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3512]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP3513]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3514:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3515:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3514]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP3515]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3516:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3517:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3516]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP3517]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3518:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3519:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3520:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3518]], i64 [[TMP3519]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3521:%.*]] = extractvalue { i64, i1 } [[TMP3520]], 0 +// CHECK-NEXT: store i64 [[TMP3521]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3522:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3523:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3524:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3522]], i64 [[TMP3523]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3525:%.*]] = extractvalue { i64, i1 } [[TMP3524]], 0 +// CHECK-NEXT: store i64 [[TMP3525]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3526:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3527:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3526]] monotonic, align 8 +// CHECK-NEXT: [[TMP3528:%.*]] = icmp ugt i64 [[TMP3527]], [[TMP3526]] +// CHECK-NEXT: [[TMP3529:%.*]] = select i1 [[TMP3528]], i64 [[TMP3526]], i64 [[TMP3527]] +// CHECK-NEXT: store i64 [[TMP3529]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3530:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3531:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3530]] monotonic, align 8 +// CHECK-NEXT: [[TMP3532:%.*]] = icmp ult i64 [[TMP3531]], [[TMP3530]] +// CHECK-NEXT: [[TMP3533:%.*]] = select i1 [[TMP3532]], i64 [[TMP3530]], i64 [[TMP3531]] +// CHECK-NEXT: store i64 [[TMP3533]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3534:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3535:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3534]] monotonic, align 8 +// CHECK-NEXT: [[TMP3536:%.*]] = icmp ult i64 [[TMP3535]], [[TMP3534]] +// CHECK-NEXT: [[TMP3537:%.*]] = select i1 [[TMP3536]], i64 [[TMP3534]], i64 [[TMP3535]] +// CHECK-NEXT: store i64 [[TMP3537]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3538:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3539:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3538]] monotonic, align 8 +// CHECK-NEXT: [[TMP3540:%.*]] = icmp ugt i64 [[TMP3539]], [[TMP3538]] +// CHECK-NEXT: [[TMP3541:%.*]] = select i1 [[TMP3540]], i64 [[TMP3538]], i64 [[TMP3539]] +// CHECK-NEXT: store i64 [[TMP3541]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3542:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3543:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3544:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3542]], i64 [[TMP3543]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3545:%.*]] = extractvalue { i64, i1 } [[TMP3544]], 0 +// CHECK-NEXT: [[TMP3546:%.*]] = extractvalue { i64, i1 } [[TMP3544]], 1 +// CHECK-NEXT: [[TMP3547:%.*]] = select i1 [[TMP3546]], i64 [[TMP3542]], i64 [[TMP3545]] +// CHECK-NEXT: store i64 [[TMP3547]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3548:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3549:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3550:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3548]], i64 [[TMP3549]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3551:%.*]] = extractvalue { i64, i1 } [[TMP3550]], 0 +// CHECK-NEXT: [[TMP3552:%.*]] = extractvalue { i64, i1 } [[TMP3550]], 1 +// CHECK-NEXT: [[TMP3553:%.*]] = select i1 [[TMP3552]], i64 [[TMP3548]], i64 [[TMP3551]] +// CHECK-NEXT: store i64 [[TMP3553]], i64* [[ULV]], align 8 +// CHECK-NEXT: [[TMP3554:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3555:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3556:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3554]], i64 [[TMP3555]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3557:%.*]] = extractvalue { i64, i1 } [[TMP3556]], 0 +// CHECK-NEXT: [[TMP3558:%.*]] = extractvalue { i64, i1 } [[TMP3556]], 1 +// CHECK-NEXT: br i1 [[TMP3558]], label [[ULX_ATOMIC_EXIT345:%.*]], label [[ULX_ATOMIC_CONT346:%.*]] +// CHECK: ulx.atomic.cont346: +// CHECK-NEXT: store i64 [[TMP3557]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT345]] +// CHECK: ulx.atomic.exit345: +// CHECK-NEXT: [[TMP3559:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3560:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3561:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3559]], i64 [[TMP3560]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3562:%.*]] = extractvalue { i64, i1 } [[TMP3561]], 0 +// CHECK-NEXT: [[TMP3563:%.*]] = extractvalue { i64, i1 } [[TMP3561]], 1 +// CHECK-NEXT: br i1 [[TMP3563]], label [[ULX_ATOMIC_EXIT347:%.*]], label [[ULX_ATOMIC_CONT348:%.*]] +// CHECK: ulx.atomic.cont348: +// CHECK-NEXT: store i64 [[TMP3562]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT347]] +// CHECK: ulx.atomic.exit347: +// CHECK-NEXT: [[TMP3564:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3565:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3566:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3564]], i64 [[TMP3565]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3567:%.*]] = extractvalue { i64, i1 } [[TMP3566]], 1 +// CHECK-NEXT: [[TMP3568:%.*]] = zext i1 [[TMP3567]] to i64 +// CHECK-NEXT: store i64 [[TMP3568]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3569:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3570:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3571:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3569]], i64 [[TMP3570]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3572:%.*]] = extractvalue { i64, i1 } [[TMP3571]], 1 +// CHECK-NEXT: [[TMP3573:%.*]] = zext i1 [[TMP3572]] to i64 +// CHECK-NEXT: store i64 [[TMP3573]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3574:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3575:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3576:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3574]], i64 [[TMP3575]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3577:%.*]] = extractvalue { i64, i1 } [[TMP3576]], 0 +// CHECK-NEXT: [[TMP3578:%.*]] = extractvalue { i64, i1 } [[TMP3576]], 1 +// CHECK-NEXT: br i1 [[TMP3578]], label [[ULX_ATOMIC_EXIT349:%.*]], label [[ULX_ATOMIC_CONT350:%.*]] +// CHECK: ulx.atomic.cont350: +// CHECK-NEXT: store i64 [[TMP3577]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT349]] +// CHECK: ulx.atomic.exit349: +// CHECK-NEXT: [[TMP3579:%.*]] = extractvalue { i64, i1 } [[TMP3576]], 1 +// CHECK-NEXT: [[TMP3580:%.*]] = zext i1 [[TMP3579]] to i64 +// CHECK-NEXT: store i64 [[TMP3580]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3581:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3582:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3583:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3581]], i64 [[TMP3582]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3584:%.*]] = extractvalue { i64, i1 } [[TMP3583]], 0 +// CHECK-NEXT: [[TMP3585:%.*]] = extractvalue { i64, i1 } [[TMP3583]], 1 +// CHECK-NEXT: br i1 [[TMP3585]], label [[ULX_ATOMIC_EXIT351:%.*]], label [[ULX_ATOMIC_CONT352:%.*]] +// CHECK: ulx.atomic.cont352: +// CHECK-NEXT: store i64 [[TMP3584]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT351]] +// CHECK: ulx.atomic.exit351: +// CHECK-NEXT: [[TMP3586:%.*]] = extractvalue { i64, i1 } [[TMP3583]], 1 +// CHECK-NEXT: [[TMP3587:%.*]] = zext i1 [[TMP3586]] to i64 +// CHECK-NEXT: store i64 [[TMP3587]], i64* [[ULR]], align 8 +// CHECK-NEXT: [[TMP3588:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3589:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3588]] release, align 8 +// CHECK-NEXT: store i64 [[TMP3589]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3590:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3591:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3590]] release, align 8 +// CHECK-NEXT: store i64 [[TMP3591]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3592:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3593:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3592]] release, align 8 +// CHECK-NEXT: store i64 [[TMP3593]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3594:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3595:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3594]] release, align 8 +// CHECK-NEXT: store i64 [[TMP3595]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3596:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3597:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3598:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3596]], i64 [[TMP3597]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3599:%.*]] = extractvalue { i64, i1 } [[TMP3598]], 0 +// CHECK-NEXT: store i64 [[TMP3599]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3600:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3601:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3602:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3600]], i64 [[TMP3601]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3603:%.*]] = extractvalue { i64, i1 } [[TMP3602]], 0 +// CHECK-NEXT: store i64 [[TMP3603]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3604:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3605:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3604]] release, align 8 +// CHECK-NEXT: [[TMP3606:%.*]] = icmp ugt i64 [[TMP3605]], [[TMP3604]] +// CHECK-NEXT: [[TMP3607:%.*]] = select i1 [[TMP3606]], i64 [[TMP3604]], i64 [[TMP3605]] +// CHECK-NEXT: store i64 [[TMP3607]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3608:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3609:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3608]] release, align 8 +// CHECK-NEXT: [[TMP3610:%.*]] = icmp ult i64 [[TMP3609]], [[TMP3608]] +// CHECK-NEXT: [[TMP3611:%.*]] = select i1 [[TMP3610]], i64 [[TMP3608]], i64 [[TMP3609]] +// CHECK-NEXT: store i64 [[TMP3611]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3612:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3613:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3612]] release, align 8 +// CHECK-NEXT: [[TMP3614:%.*]] = icmp ult i64 [[TMP3613]], [[TMP3612]] +// CHECK-NEXT: [[TMP3615:%.*]] = select i1 [[TMP3614]], i64 [[TMP3612]], i64 [[TMP3613]] +// CHECK-NEXT: store i64 [[TMP3615]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3616:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3617:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3616]] release, align 8 +// CHECK-NEXT: [[TMP3618:%.*]] = icmp ugt i64 [[TMP3617]], [[TMP3616]] +// CHECK-NEXT: [[TMP3619:%.*]] = select i1 [[TMP3618]], i64 [[TMP3616]], i64 [[TMP3617]] +// CHECK-NEXT: store i64 [[TMP3619]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3620:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3621:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3622:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3620]], i64 [[TMP3621]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3623:%.*]] = extractvalue { i64, i1 } [[TMP3622]], 0 +// CHECK-NEXT: [[TMP3624:%.*]] = extractvalue { i64, i1 } [[TMP3622]], 1 +// CHECK-NEXT: [[TMP3625:%.*]] = select i1 [[TMP3624]], i64 [[TMP3620]], i64 [[TMP3623]] +// CHECK-NEXT: store i64 [[TMP3625]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3626:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3627:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3628:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3626]], i64 [[TMP3627]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3629:%.*]] = extractvalue { i64, i1 } [[TMP3628]], 0 +// CHECK-NEXT: [[TMP3630:%.*]] = extractvalue { i64, i1 } [[TMP3628]], 1 +// CHECK-NEXT: [[TMP3631:%.*]] = select i1 [[TMP3630]], i64 [[TMP3626]], i64 [[TMP3629]] +// CHECK-NEXT: store i64 [[TMP3631]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3632:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3633:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3634:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3632]], i64 [[TMP3633]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3635:%.*]] = extractvalue { i64, i1 } [[TMP3634]], 0 +// CHECK-NEXT: [[TMP3636:%.*]] = extractvalue { i64, i1 } [[TMP3634]], 1 +// CHECK-NEXT: br i1 [[TMP3636]], label [[ULX_ATOMIC_EXIT353:%.*]], label [[ULX_ATOMIC_CONT354:%.*]] +// CHECK: ulx.atomic.cont354: +// CHECK-NEXT: store i64 [[TMP3635]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT353]] +// CHECK: ulx.atomic.exit353: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3637:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3638:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3639:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3637]], i64 [[TMP3638]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3640:%.*]] = extractvalue { i64, i1 } [[TMP3639]], 0 +// CHECK-NEXT: [[TMP3641:%.*]] = extractvalue { i64, i1 } [[TMP3639]], 1 +// CHECK-NEXT: br i1 [[TMP3641]], label [[ULX_ATOMIC_EXIT355:%.*]], label [[ULX_ATOMIC_CONT356:%.*]] +// CHECK: ulx.atomic.cont356: +// CHECK-NEXT: store i64 [[TMP3640]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT355]] +// CHECK: ulx.atomic.exit355: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3642:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3643:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3644:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3642]], i64 [[TMP3643]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3645:%.*]] = extractvalue { i64, i1 } [[TMP3644]], 1 +// CHECK-NEXT: [[TMP3646:%.*]] = zext i1 [[TMP3645]] to i64 +// CHECK-NEXT: store i64 [[TMP3646]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3647:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3648:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3649:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3647]], i64 [[TMP3648]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3650:%.*]] = extractvalue { i64, i1 } [[TMP3649]], 1 +// CHECK-NEXT: [[TMP3651:%.*]] = zext i1 [[TMP3650]] to i64 +// CHECK-NEXT: store i64 [[TMP3651]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3652:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3653:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3654:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3652]], i64 [[TMP3653]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3655:%.*]] = extractvalue { i64, i1 } [[TMP3654]], 0 +// CHECK-NEXT: [[TMP3656:%.*]] = extractvalue { i64, i1 } [[TMP3654]], 1 +// CHECK-NEXT: br i1 [[TMP3656]], label [[ULX_ATOMIC_EXIT357:%.*]], label [[ULX_ATOMIC_CONT358:%.*]] +// CHECK: ulx.atomic.cont358: +// CHECK-NEXT: store i64 [[TMP3655]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT357]] +// CHECK: ulx.atomic.exit357: +// CHECK-NEXT: [[TMP3657:%.*]] = extractvalue { i64, i1 } [[TMP3654]], 1 +// CHECK-NEXT: [[TMP3658:%.*]] = zext i1 [[TMP3657]] to i64 +// CHECK-NEXT: store i64 [[TMP3658]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3659:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3660:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3661:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3659]], i64 [[TMP3660]] release monotonic, align 8 +// CHECK-NEXT: [[TMP3662:%.*]] = extractvalue { i64, i1 } [[TMP3661]], 0 +// CHECK-NEXT: [[TMP3663:%.*]] = extractvalue { i64, i1 } [[TMP3661]], 1 +// CHECK-NEXT: br i1 [[TMP3663]], label [[ULX_ATOMIC_EXIT359:%.*]], label [[ULX_ATOMIC_CONT360:%.*]] +// CHECK: ulx.atomic.cont360: +// CHECK-NEXT: store i64 [[TMP3662]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT359]] +// CHECK: ulx.atomic.exit359: +// CHECK-NEXT: [[TMP3664:%.*]] = extractvalue { i64, i1 } [[TMP3661]], 1 +// CHECK-NEXT: [[TMP3665:%.*]] = zext i1 [[TMP3664]] to i64 +// CHECK-NEXT: store i64 [[TMP3665]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3666:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3667:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3666]] seq_cst, align 8 +// CHECK-NEXT: store i64 [[TMP3667]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3668:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3669:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3668]] seq_cst, align 8 +// CHECK-NEXT: store i64 [[TMP3669]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3670:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3671:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3670]] seq_cst, align 8 +// CHECK-NEXT: store i64 [[TMP3671]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3672:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3673:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3672]] seq_cst, align 8 +// CHECK-NEXT: store i64 [[TMP3673]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3674:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3675:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3676:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3674]], i64 [[TMP3675]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3677:%.*]] = extractvalue { i64, i1 } [[TMP3676]], 0 +// CHECK-NEXT: store i64 [[TMP3677]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3678:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3679:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3680:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3678]], i64 [[TMP3679]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3681:%.*]] = extractvalue { i64, i1 } [[TMP3680]], 0 +// CHECK-NEXT: store i64 [[TMP3681]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3682:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3683:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3682]] seq_cst, align 8 +// CHECK-NEXT: [[TMP3684:%.*]] = icmp ugt i64 [[TMP3683]], [[TMP3682]] +// CHECK-NEXT: [[TMP3685:%.*]] = select i1 [[TMP3684]], i64 [[TMP3682]], i64 [[TMP3683]] +// CHECK-NEXT: store i64 [[TMP3685]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3686:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3687:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3686]] seq_cst, align 8 +// CHECK-NEXT: [[TMP3688:%.*]] = icmp ult i64 [[TMP3687]], [[TMP3686]] +// CHECK-NEXT: [[TMP3689:%.*]] = select i1 [[TMP3688]], i64 [[TMP3686]], i64 [[TMP3687]] +// CHECK-NEXT: store i64 [[TMP3689]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3690:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3691:%.*]] = atomicrmw umin i64* [[ULX]], i64 [[TMP3690]] seq_cst, align 8 +// CHECK-NEXT: [[TMP3692:%.*]] = icmp ult i64 [[TMP3691]], [[TMP3690]] +// CHECK-NEXT: [[TMP3693:%.*]] = select i1 [[TMP3692]], i64 [[TMP3690]], i64 [[TMP3691]] +// CHECK-NEXT: store i64 [[TMP3693]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3694:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3695:%.*]] = atomicrmw umax i64* [[ULX]], i64 [[TMP3694]] seq_cst, align 8 +// CHECK-NEXT: [[TMP3696:%.*]] = icmp ugt i64 [[TMP3695]], [[TMP3694]] +// CHECK-NEXT: [[TMP3697:%.*]] = select i1 [[TMP3696]], i64 [[TMP3694]], i64 [[TMP3695]] +// CHECK-NEXT: store i64 [[TMP3697]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3698:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3699:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3700:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3698]], i64 [[TMP3699]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3701:%.*]] = extractvalue { i64, i1 } [[TMP3700]], 0 +// CHECK-NEXT: [[TMP3702:%.*]] = extractvalue { i64, i1 } [[TMP3700]], 1 +// CHECK-NEXT: [[TMP3703:%.*]] = select i1 [[TMP3702]], i64 [[TMP3698]], i64 [[TMP3701]] +// CHECK-NEXT: store i64 [[TMP3703]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3704:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3705:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3706:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3704]], i64 [[TMP3705]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3707:%.*]] = extractvalue { i64, i1 } [[TMP3706]], 0 +// CHECK-NEXT: [[TMP3708:%.*]] = extractvalue { i64, i1 } [[TMP3706]], 1 +// CHECK-NEXT: [[TMP3709:%.*]] = select i1 [[TMP3708]], i64 [[TMP3704]], i64 [[TMP3707]] +// CHECK-NEXT: store i64 [[TMP3709]], i64* [[ULV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3710:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3711:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3712:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3710]], i64 [[TMP3711]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3713:%.*]] = extractvalue { i64, i1 } [[TMP3712]], 0 +// CHECK-NEXT: [[TMP3714:%.*]] = extractvalue { i64, i1 } [[TMP3712]], 1 +// CHECK-NEXT: br i1 [[TMP3714]], label [[ULX_ATOMIC_EXIT361:%.*]], label [[ULX_ATOMIC_CONT362:%.*]] +// CHECK: ulx.atomic.cont362: +// CHECK-NEXT: store i64 [[TMP3713]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT361]] +// CHECK: ulx.atomic.exit361: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3715:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3716:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3717:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3715]], i64 [[TMP3716]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3718:%.*]] = extractvalue { i64, i1 } [[TMP3717]], 0 +// CHECK-NEXT: [[TMP3719:%.*]] = extractvalue { i64, i1 } [[TMP3717]], 1 +// CHECK-NEXT: br i1 [[TMP3719]], label [[ULX_ATOMIC_EXIT363:%.*]], label [[ULX_ATOMIC_CONT364:%.*]] +// CHECK: ulx.atomic.cont364: +// CHECK-NEXT: store i64 [[TMP3718]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT363]] +// CHECK: ulx.atomic.exit363: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3720:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3721:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3722:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3720]], i64 [[TMP3721]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3723:%.*]] = extractvalue { i64, i1 } [[TMP3722]], 1 +// CHECK-NEXT: [[TMP3724:%.*]] = zext i1 [[TMP3723]] to i64 +// CHECK-NEXT: store i64 [[TMP3724]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3725:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3726:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3727:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3725]], i64 [[TMP3726]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3728:%.*]] = extractvalue { i64, i1 } [[TMP3727]], 1 +// CHECK-NEXT: [[TMP3729:%.*]] = zext i1 [[TMP3728]] to i64 +// CHECK-NEXT: store i64 [[TMP3729]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3730:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3731:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3732:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3730]], i64 [[TMP3731]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3733:%.*]] = extractvalue { i64, i1 } [[TMP3732]], 0 +// CHECK-NEXT: [[TMP3734:%.*]] = extractvalue { i64, i1 } [[TMP3732]], 1 +// CHECK-NEXT: br i1 [[TMP3734]], label [[ULX_ATOMIC_EXIT365:%.*]], label [[ULX_ATOMIC_CONT366:%.*]] +// CHECK: ulx.atomic.cont366: +// CHECK-NEXT: store i64 [[TMP3733]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT365]] +// CHECK: ulx.atomic.exit365: +// CHECK-NEXT: [[TMP3735:%.*]] = extractvalue { i64, i1 } [[TMP3732]], 1 +// CHECK-NEXT: [[TMP3736:%.*]] = zext i1 [[TMP3735]] to i64 +// CHECK-NEXT: store i64 [[TMP3736]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3737:%.*]] = load i64, i64* [[ULE]], align 8 +// CHECK-NEXT: [[TMP3738:%.*]] = load i64, i64* [[ULD]], align 8 +// CHECK-NEXT: [[TMP3739:%.*]] = cmpxchg i64* [[ULX]], i64 [[TMP3737]], i64 [[TMP3738]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP3740:%.*]] = extractvalue { i64, i1 } [[TMP3739]], 0 +// CHECK-NEXT: [[TMP3741:%.*]] = extractvalue { i64, i1 } [[TMP3739]], 1 +// CHECK-NEXT: br i1 [[TMP3741]], label [[ULX_ATOMIC_EXIT367:%.*]], label [[ULX_ATOMIC_CONT368:%.*]] +// CHECK: ulx.atomic.cont368: +// CHECK-NEXT: store i64 [[TMP3740]], i64* [[ULV]], align 8 +// CHECK-NEXT: br label [[ULX_ATOMIC_EXIT367]] +// CHECK: ulx.atomic.exit367: +// CHECK-NEXT: [[TMP3742:%.*]] = extractvalue { i64, i1 } [[TMP3739]], 1 +// CHECK-NEXT: [[TMP3743:%.*]] = zext i1 [[TMP3742]] to i64 +// CHECK-NEXT: store i64 [[TMP3743]], i64* [[ULR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3744:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3745:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3744]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3745]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3746:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3747:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3746]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3747]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3748:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3749:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3748]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3749]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3750:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3751:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3750]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3751]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3752:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3753:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3754:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3752]], i64 [[TMP3753]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3755:%.*]] = extractvalue { i64, i1 } [[TMP3754]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3755]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3756:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3757:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3758:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3756]], i64 [[TMP3757]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3759:%.*]] = extractvalue { i64, i1 } [[TMP3758]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3759]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3760:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3761:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3760]] monotonic, align 8 +// CHECK-NEXT: [[TMP3762:%.*]] = icmp ugt i64 [[TMP3761]], [[TMP3760]] +// CHECK-NEXT: [[TMP3763:%.*]] = select i1 [[TMP3762]], i64 [[TMP3760]], i64 [[TMP3761]] +// CHECK-NEXT: store volatile i64 [[TMP3763]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3764:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3765:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3764]] monotonic, align 8 +// CHECK-NEXT: [[TMP3766:%.*]] = icmp ult i64 [[TMP3765]], [[TMP3764]] +// CHECK-NEXT: [[TMP3767:%.*]] = select i1 [[TMP3766]], i64 [[TMP3764]], i64 [[TMP3765]] +// CHECK-NEXT: store volatile i64 [[TMP3767]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3768:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3769:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3768]] monotonic, align 8 +// CHECK-NEXT: [[TMP3770:%.*]] = icmp ult i64 [[TMP3769]], [[TMP3768]] +// CHECK-NEXT: [[TMP3771:%.*]] = select i1 [[TMP3770]], i64 [[TMP3768]], i64 [[TMP3769]] +// CHECK-NEXT: store volatile i64 [[TMP3771]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3772:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3773:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3772]] monotonic, align 8 +// CHECK-NEXT: [[TMP3774:%.*]] = icmp ugt i64 [[TMP3773]], [[TMP3772]] +// CHECK-NEXT: [[TMP3775:%.*]] = select i1 [[TMP3774]], i64 [[TMP3772]], i64 [[TMP3773]] +// CHECK-NEXT: store volatile i64 [[TMP3775]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3776:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3777:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3778:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3776]], i64 [[TMP3777]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3779:%.*]] = extractvalue { i64, i1 } [[TMP3778]], 0 +// CHECK-NEXT: [[TMP3780:%.*]] = extractvalue { i64, i1 } [[TMP3778]], 1 +// CHECK-NEXT: [[TMP3781:%.*]] = select i1 [[TMP3780]], i64 [[TMP3776]], i64 [[TMP3779]] +// CHECK-NEXT: store volatile i64 [[TMP3781]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3782:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3783:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3784:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3782]], i64 [[TMP3783]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3785:%.*]] = extractvalue { i64, i1 } [[TMP3784]], 0 +// CHECK-NEXT: [[TMP3786:%.*]] = extractvalue { i64, i1 } [[TMP3784]], 1 +// CHECK-NEXT: [[TMP3787:%.*]] = select i1 [[TMP3786]], i64 [[TMP3782]], i64 [[TMP3785]] +// CHECK-NEXT: store volatile i64 [[TMP3787]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3788:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3789:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3790:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3788]], i64 [[TMP3789]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3791:%.*]] = extractvalue { i64, i1 } [[TMP3790]], 0 +// CHECK-NEXT: [[TMP3792:%.*]] = extractvalue { i64, i1 } [[TMP3790]], 1 +// CHECK-NEXT: br i1 [[TMP3792]], label [[LLX_ATOMIC_EXIT:%.*]], label [[LLX_ATOMIC_CONT:%.*]] +// CHECK: llx.atomic.cont: +// CHECK-NEXT: store i64 [[TMP3791]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT]] +// CHECK: llx.atomic.exit: +// CHECK-NEXT: [[TMP3793:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3794:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3795:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3793]], i64 [[TMP3794]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3796:%.*]] = extractvalue { i64, i1 } [[TMP3795]], 0 +// CHECK-NEXT: [[TMP3797:%.*]] = extractvalue { i64, i1 } [[TMP3795]], 1 +// CHECK-NEXT: br i1 [[TMP3797]], label [[LLX_ATOMIC_EXIT369:%.*]], label [[LLX_ATOMIC_CONT370:%.*]] +// CHECK: llx.atomic.cont370: +// CHECK-NEXT: store i64 [[TMP3796]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT369]] +// CHECK: llx.atomic.exit369: +// CHECK-NEXT: [[TMP3798:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3799:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3800:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3798]], i64 [[TMP3799]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3801:%.*]] = extractvalue { i64, i1 } [[TMP3800]], 1 +// CHECK-NEXT: [[TMP3802:%.*]] = zext i1 [[TMP3801]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3802]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP3803:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3804:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3805:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3803]], i64 [[TMP3804]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3806:%.*]] = extractvalue { i64, i1 } [[TMP3805]], 1 +// CHECK-NEXT: [[TMP3807:%.*]] = zext i1 [[TMP3806]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3807]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP3808:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3809:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3810:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3808]], i64 [[TMP3809]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3811:%.*]] = extractvalue { i64, i1 } [[TMP3810]], 0 +// CHECK-NEXT: [[TMP3812:%.*]] = extractvalue { i64, i1 } [[TMP3810]], 1 +// CHECK-NEXT: br i1 [[TMP3812]], label [[LLX_ATOMIC_EXIT371:%.*]], label [[LLX_ATOMIC_CONT372:%.*]] +// CHECK: llx.atomic.cont372: +// CHECK-NEXT: store i64 [[TMP3811]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT371]] +// CHECK: llx.atomic.exit371: +// CHECK-NEXT: [[TMP3813:%.*]] = extractvalue { i64, i1 } [[TMP3810]], 1 +// CHECK-NEXT: [[TMP3814:%.*]] = zext i1 [[TMP3813]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3814]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP3815:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3816:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3817:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3815]], i64 [[TMP3816]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3818:%.*]] = extractvalue { i64, i1 } [[TMP3817]], 0 +// CHECK-NEXT: [[TMP3819:%.*]] = extractvalue { i64, i1 } [[TMP3817]], 1 +// CHECK-NEXT: br i1 [[TMP3819]], label [[LLX_ATOMIC_EXIT373:%.*]], label [[LLX_ATOMIC_CONT374:%.*]] +// CHECK: llx.atomic.cont374: +// CHECK-NEXT: store i64 [[TMP3818]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT373]] +// CHECK: llx.atomic.exit373: +// CHECK-NEXT: [[TMP3820:%.*]] = extractvalue { i64, i1 } [[TMP3817]], 1 +// CHECK-NEXT: [[TMP3821:%.*]] = zext i1 [[TMP3820]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3821]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP3822:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3823:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3822]] acq_rel, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3823]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3824:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3825:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3824]] acq_rel, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3825]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3826:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3827:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3826]] acq_rel, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3827]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3828:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3829:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3828]] acq_rel, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3829]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3830:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3831:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3832:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3830]], i64 [[TMP3831]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3833:%.*]] = extractvalue { i64, i1 } [[TMP3832]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3833]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3834:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3835:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3836:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3834]], i64 [[TMP3835]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3837:%.*]] = extractvalue { i64, i1 } [[TMP3836]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3837]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3838:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3839:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3838]] acq_rel, align 8 +// CHECK-NEXT: [[TMP3840:%.*]] = icmp ugt i64 [[TMP3839]], [[TMP3838]] +// CHECK-NEXT: [[TMP3841:%.*]] = select i1 [[TMP3840]], i64 [[TMP3838]], i64 [[TMP3839]] +// CHECK-NEXT: store volatile i64 [[TMP3841]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3842:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3843:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3842]] acq_rel, align 8 +// CHECK-NEXT: [[TMP3844:%.*]] = icmp ult i64 [[TMP3843]], [[TMP3842]] +// CHECK-NEXT: [[TMP3845:%.*]] = select i1 [[TMP3844]], i64 [[TMP3842]], i64 [[TMP3843]] +// CHECK-NEXT: store volatile i64 [[TMP3845]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3846:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3847:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3846]] acq_rel, align 8 +// CHECK-NEXT: [[TMP3848:%.*]] = icmp ult i64 [[TMP3847]], [[TMP3846]] +// CHECK-NEXT: [[TMP3849:%.*]] = select i1 [[TMP3848]], i64 [[TMP3846]], i64 [[TMP3847]] +// CHECK-NEXT: store volatile i64 [[TMP3849]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3850:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3851:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3850]] acq_rel, align 8 +// CHECK-NEXT: [[TMP3852:%.*]] = icmp ugt i64 [[TMP3851]], [[TMP3850]] +// CHECK-NEXT: [[TMP3853:%.*]] = select i1 [[TMP3852]], i64 [[TMP3850]], i64 [[TMP3851]] +// CHECK-NEXT: store volatile i64 [[TMP3853]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3854:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3855:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3856:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3854]], i64 [[TMP3855]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3857:%.*]] = extractvalue { i64, i1 } [[TMP3856]], 0 +// CHECK-NEXT: [[TMP3858:%.*]] = extractvalue { i64, i1 } [[TMP3856]], 1 +// CHECK-NEXT: [[TMP3859:%.*]] = select i1 [[TMP3858]], i64 [[TMP3854]], i64 [[TMP3857]] +// CHECK-NEXT: store volatile i64 [[TMP3859]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3860:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3861:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3862:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3860]], i64 [[TMP3861]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3863:%.*]] = extractvalue { i64, i1 } [[TMP3862]], 0 +// CHECK-NEXT: [[TMP3864:%.*]] = extractvalue { i64, i1 } [[TMP3862]], 1 +// CHECK-NEXT: [[TMP3865:%.*]] = select i1 [[TMP3864]], i64 [[TMP3860]], i64 [[TMP3863]] +// CHECK-NEXT: store volatile i64 [[TMP3865]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3866:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3867:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3868:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3866]], i64 [[TMP3867]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3869:%.*]] = extractvalue { i64, i1 } [[TMP3868]], 0 +// CHECK-NEXT: [[TMP3870:%.*]] = extractvalue { i64, i1 } [[TMP3868]], 1 +// CHECK-NEXT: br i1 [[TMP3870]], label [[LLX_ATOMIC_EXIT375:%.*]], label [[LLX_ATOMIC_CONT376:%.*]] +// CHECK: llx.atomic.cont376: +// CHECK-NEXT: store i64 [[TMP3869]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT375]] +// CHECK: llx.atomic.exit375: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3871:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3872:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3873:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3871]], i64 [[TMP3872]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3874:%.*]] = extractvalue { i64, i1 } [[TMP3873]], 0 +// CHECK-NEXT: [[TMP3875:%.*]] = extractvalue { i64, i1 } [[TMP3873]], 1 +// CHECK-NEXT: br i1 [[TMP3875]], label [[LLX_ATOMIC_EXIT377:%.*]], label [[LLX_ATOMIC_CONT378:%.*]] +// CHECK: llx.atomic.cont378: +// CHECK-NEXT: store i64 [[TMP3874]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT377]] +// CHECK: llx.atomic.exit377: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3876:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3877:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3878:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3876]], i64 [[TMP3877]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3879:%.*]] = extractvalue { i64, i1 } [[TMP3878]], 1 +// CHECK-NEXT: [[TMP3880:%.*]] = zext i1 [[TMP3879]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3880]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3881:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3882:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3883:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3881]], i64 [[TMP3882]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3884:%.*]] = extractvalue { i64, i1 } [[TMP3883]], 1 +// CHECK-NEXT: [[TMP3885:%.*]] = zext i1 [[TMP3884]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3885]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3886:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3887:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3888:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3886]], i64 [[TMP3887]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3889:%.*]] = extractvalue { i64, i1 } [[TMP3888]], 0 +// CHECK-NEXT: [[TMP3890:%.*]] = extractvalue { i64, i1 } [[TMP3888]], 1 +// CHECK-NEXT: br i1 [[TMP3890]], label [[LLX_ATOMIC_EXIT379:%.*]], label [[LLX_ATOMIC_CONT380:%.*]] +// CHECK: llx.atomic.cont380: +// CHECK-NEXT: store i64 [[TMP3889]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT379]] +// CHECK: llx.atomic.exit379: +// CHECK-NEXT: [[TMP3891:%.*]] = extractvalue { i64, i1 } [[TMP3888]], 1 +// CHECK-NEXT: [[TMP3892:%.*]] = zext i1 [[TMP3891]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3892]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3893:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3894:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3895:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3893]], i64 [[TMP3894]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP3896:%.*]] = extractvalue { i64, i1 } [[TMP3895]], 0 +// CHECK-NEXT: [[TMP3897:%.*]] = extractvalue { i64, i1 } [[TMP3895]], 1 +// CHECK-NEXT: br i1 [[TMP3897]], label [[LLX_ATOMIC_EXIT381:%.*]], label [[LLX_ATOMIC_CONT382:%.*]] +// CHECK: llx.atomic.cont382: +// CHECK-NEXT: store i64 [[TMP3896]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT381]] +// CHECK: llx.atomic.exit381: +// CHECK-NEXT: [[TMP3898:%.*]] = extractvalue { i64, i1 } [[TMP3895]], 1 +// CHECK-NEXT: [[TMP3899:%.*]] = zext i1 [[TMP3898]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3899]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP3900:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3901:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3900]] acquire, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3901]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3902:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3903:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3902]] acquire, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3903]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3904:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3905:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3904]] acquire, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3905]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3906:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3907:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3906]] acquire, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3907]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3908:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3909:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3910:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3908]], i64 [[TMP3909]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3911:%.*]] = extractvalue { i64, i1 } [[TMP3910]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3911]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3912:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3913:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3914:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3912]], i64 [[TMP3913]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3915:%.*]] = extractvalue { i64, i1 } [[TMP3914]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3915]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3916:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3917:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3916]] acquire, align 8 +// CHECK-NEXT: [[TMP3918:%.*]] = icmp ugt i64 [[TMP3917]], [[TMP3916]] +// CHECK-NEXT: [[TMP3919:%.*]] = select i1 [[TMP3918]], i64 [[TMP3916]], i64 [[TMP3917]] +// CHECK-NEXT: store volatile i64 [[TMP3919]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3920:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3921:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3920]] acquire, align 8 +// CHECK-NEXT: [[TMP3922:%.*]] = icmp ult i64 [[TMP3921]], [[TMP3920]] +// CHECK-NEXT: [[TMP3923:%.*]] = select i1 [[TMP3922]], i64 [[TMP3920]], i64 [[TMP3921]] +// CHECK-NEXT: store volatile i64 [[TMP3923]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3924:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3925:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3924]] acquire, align 8 +// CHECK-NEXT: [[TMP3926:%.*]] = icmp ult i64 [[TMP3925]], [[TMP3924]] +// CHECK-NEXT: [[TMP3927:%.*]] = select i1 [[TMP3926]], i64 [[TMP3924]], i64 [[TMP3925]] +// CHECK-NEXT: store volatile i64 [[TMP3927]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3928:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3929:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3928]] acquire, align 8 +// CHECK-NEXT: [[TMP3930:%.*]] = icmp ugt i64 [[TMP3929]], [[TMP3928]] +// CHECK-NEXT: [[TMP3931:%.*]] = select i1 [[TMP3930]], i64 [[TMP3928]], i64 [[TMP3929]] +// CHECK-NEXT: store volatile i64 [[TMP3931]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3932:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3933:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3934:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3932]], i64 [[TMP3933]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3935:%.*]] = extractvalue { i64, i1 } [[TMP3934]], 0 +// CHECK-NEXT: [[TMP3936:%.*]] = extractvalue { i64, i1 } [[TMP3934]], 1 +// CHECK-NEXT: [[TMP3937:%.*]] = select i1 [[TMP3936]], i64 [[TMP3932]], i64 [[TMP3935]] +// CHECK-NEXT: store volatile i64 [[TMP3937]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3938:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3939:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3940:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3938]], i64 [[TMP3939]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3941:%.*]] = extractvalue { i64, i1 } [[TMP3940]], 0 +// CHECK-NEXT: [[TMP3942:%.*]] = extractvalue { i64, i1 } [[TMP3940]], 1 +// CHECK-NEXT: [[TMP3943:%.*]] = select i1 [[TMP3942]], i64 [[TMP3938]], i64 [[TMP3941]] +// CHECK-NEXT: store volatile i64 [[TMP3943]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3944:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3945:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3946:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3944]], i64 [[TMP3945]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3947:%.*]] = extractvalue { i64, i1 } [[TMP3946]], 0 +// CHECK-NEXT: [[TMP3948:%.*]] = extractvalue { i64, i1 } [[TMP3946]], 1 +// CHECK-NEXT: br i1 [[TMP3948]], label [[LLX_ATOMIC_EXIT383:%.*]], label [[LLX_ATOMIC_CONT384:%.*]] +// CHECK: llx.atomic.cont384: +// CHECK-NEXT: store i64 [[TMP3947]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT383]] +// CHECK: llx.atomic.exit383: +// CHECK-NEXT: [[TMP3949:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3950:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3951:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3949]], i64 [[TMP3950]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3952:%.*]] = extractvalue { i64, i1 } [[TMP3951]], 0 +// CHECK-NEXT: [[TMP3953:%.*]] = extractvalue { i64, i1 } [[TMP3951]], 1 +// CHECK-NEXT: br i1 [[TMP3953]], label [[LLX_ATOMIC_EXIT385:%.*]], label [[LLX_ATOMIC_CONT386:%.*]] +// CHECK: llx.atomic.cont386: +// CHECK-NEXT: store i64 [[TMP3952]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT385]] +// CHECK: llx.atomic.exit385: +// CHECK-NEXT: [[TMP3954:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3955:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3956:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3954]], i64 [[TMP3955]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3957:%.*]] = extractvalue { i64, i1 } [[TMP3956]], 1 +// CHECK-NEXT: [[TMP3958:%.*]] = zext i1 [[TMP3957]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3958]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP3959:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3960:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3961:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3959]], i64 [[TMP3960]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3962:%.*]] = extractvalue { i64, i1 } [[TMP3961]], 1 +// CHECK-NEXT: [[TMP3963:%.*]] = zext i1 [[TMP3962]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3963]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP3964:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3965:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3966:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3964]], i64 [[TMP3965]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3967:%.*]] = extractvalue { i64, i1 } [[TMP3966]], 0 +// CHECK-NEXT: [[TMP3968:%.*]] = extractvalue { i64, i1 } [[TMP3966]], 1 +// CHECK-NEXT: br i1 [[TMP3968]], label [[LLX_ATOMIC_EXIT387:%.*]], label [[LLX_ATOMIC_CONT388:%.*]] +// CHECK: llx.atomic.cont388: +// CHECK-NEXT: store i64 [[TMP3967]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT387]] +// CHECK: llx.atomic.exit387: +// CHECK-NEXT: [[TMP3969:%.*]] = extractvalue { i64, i1 } [[TMP3966]], 1 +// CHECK-NEXT: [[TMP3970:%.*]] = zext i1 [[TMP3969]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3970]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP3971:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3972:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3973:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3971]], i64 [[TMP3972]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP3974:%.*]] = extractvalue { i64, i1 } [[TMP3973]], 0 +// CHECK-NEXT: [[TMP3975:%.*]] = extractvalue { i64, i1 } [[TMP3973]], 1 +// CHECK-NEXT: br i1 [[TMP3975]], label [[LLX_ATOMIC_EXIT389:%.*]], label [[LLX_ATOMIC_CONT390:%.*]] +// CHECK: llx.atomic.cont390: +// CHECK-NEXT: store i64 [[TMP3974]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT389]] +// CHECK: llx.atomic.exit389: +// CHECK-NEXT: [[TMP3976:%.*]] = extractvalue { i64, i1 } [[TMP3973]], 1 +// CHECK-NEXT: [[TMP3977:%.*]] = zext i1 [[TMP3976]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP3977]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP3978:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3979:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3978]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3979]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3980:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3981:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3980]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3981]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3982:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3983:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3982]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3983]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3984:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3985:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3984]] monotonic, align 8 +// CHECK-NEXT: store volatile i64 [[TMP3985]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3986:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3987:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3988:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3986]], i64 [[TMP3987]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3989:%.*]] = extractvalue { i64, i1 } [[TMP3988]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3989]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3990:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3991:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP3992:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP3990]], i64 [[TMP3991]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP3993:%.*]] = extractvalue { i64, i1 } [[TMP3992]], 0 +// CHECK-NEXT: store volatile i64 [[TMP3993]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3994:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3995:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP3994]] monotonic, align 8 +// CHECK-NEXT: [[TMP3996:%.*]] = icmp ugt i64 [[TMP3995]], [[TMP3994]] +// CHECK-NEXT: [[TMP3997:%.*]] = select i1 [[TMP3996]], i64 [[TMP3994]], i64 [[TMP3995]] +// CHECK-NEXT: store volatile i64 [[TMP3997]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP3998:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP3999:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP3998]] monotonic, align 8 +// CHECK-NEXT: [[TMP4000:%.*]] = icmp ult i64 [[TMP3999]], [[TMP3998]] +// CHECK-NEXT: [[TMP4001:%.*]] = select i1 [[TMP4000]], i64 [[TMP3998]], i64 [[TMP3999]] +// CHECK-NEXT: store volatile i64 [[TMP4001]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP4002:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4003:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP4002]] monotonic, align 8 +// CHECK-NEXT: [[TMP4004:%.*]] = icmp ult i64 [[TMP4003]], [[TMP4002]] +// CHECK-NEXT: [[TMP4005:%.*]] = select i1 [[TMP4004]], i64 [[TMP4002]], i64 [[TMP4003]] +// CHECK-NEXT: store volatile i64 [[TMP4005]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP4006:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4007:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP4006]] monotonic, align 8 +// CHECK-NEXT: [[TMP4008:%.*]] = icmp ugt i64 [[TMP4007]], [[TMP4006]] +// CHECK-NEXT: [[TMP4009:%.*]] = select i1 [[TMP4008]], i64 [[TMP4006]], i64 [[TMP4007]] +// CHECK-NEXT: store volatile i64 [[TMP4009]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP4010:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4011:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4012:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4010]], i64 [[TMP4011]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4013:%.*]] = extractvalue { i64, i1 } [[TMP4012]], 0 +// CHECK-NEXT: [[TMP4014:%.*]] = extractvalue { i64, i1 } [[TMP4012]], 1 +// CHECK-NEXT: [[TMP4015:%.*]] = select i1 [[TMP4014]], i64 [[TMP4010]], i64 [[TMP4013]] +// CHECK-NEXT: store volatile i64 [[TMP4015]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP4016:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4017:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4018:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4016]], i64 [[TMP4017]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4019:%.*]] = extractvalue { i64, i1 } [[TMP4018]], 0 +// CHECK-NEXT: [[TMP4020:%.*]] = extractvalue { i64, i1 } [[TMP4018]], 1 +// CHECK-NEXT: [[TMP4021:%.*]] = select i1 [[TMP4020]], i64 [[TMP4016]], i64 [[TMP4019]] +// CHECK-NEXT: store volatile i64 [[TMP4021]], i64* [[LLV]], align 8 +// CHECK-NEXT: [[TMP4022:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4023:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4024:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4022]], i64 [[TMP4023]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4025:%.*]] = extractvalue { i64, i1 } [[TMP4024]], 0 +// CHECK-NEXT: [[TMP4026:%.*]] = extractvalue { i64, i1 } [[TMP4024]], 1 +// CHECK-NEXT: br i1 [[TMP4026]], label [[LLX_ATOMIC_EXIT391:%.*]], label [[LLX_ATOMIC_CONT392:%.*]] +// CHECK: llx.atomic.cont392: +// CHECK-NEXT: store i64 [[TMP4025]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT391]] +// CHECK: llx.atomic.exit391: +// CHECK-NEXT: [[TMP4027:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4028:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4029:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4027]], i64 [[TMP4028]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4030:%.*]] = extractvalue { i64, i1 } [[TMP4029]], 0 +// CHECK-NEXT: [[TMP4031:%.*]] = extractvalue { i64, i1 } [[TMP4029]], 1 +// CHECK-NEXT: br i1 [[TMP4031]], label [[LLX_ATOMIC_EXIT393:%.*]], label [[LLX_ATOMIC_CONT394:%.*]] +// CHECK: llx.atomic.cont394: +// CHECK-NEXT: store i64 [[TMP4030]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT393]] +// CHECK: llx.atomic.exit393: +// CHECK-NEXT: [[TMP4032:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4033:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4034:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4032]], i64 [[TMP4033]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4035:%.*]] = extractvalue { i64, i1 } [[TMP4034]], 1 +// CHECK-NEXT: [[TMP4036:%.*]] = zext i1 [[TMP4035]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4036]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP4037:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4038:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4039:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4037]], i64 [[TMP4038]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4040:%.*]] = extractvalue { i64, i1 } [[TMP4039]], 1 +// CHECK-NEXT: [[TMP4041:%.*]] = zext i1 [[TMP4040]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4041]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP4042:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4043:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4044:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4042]], i64 [[TMP4043]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4045:%.*]] = extractvalue { i64, i1 } [[TMP4044]], 0 +// CHECK-NEXT: [[TMP4046:%.*]] = extractvalue { i64, i1 } [[TMP4044]], 1 +// CHECK-NEXT: br i1 [[TMP4046]], label [[LLX_ATOMIC_EXIT395:%.*]], label [[LLX_ATOMIC_CONT396:%.*]] +// CHECK: llx.atomic.cont396: +// CHECK-NEXT: store i64 [[TMP4045]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT395]] +// CHECK: llx.atomic.exit395: +// CHECK-NEXT: [[TMP4047:%.*]] = extractvalue { i64, i1 } [[TMP4044]], 1 +// CHECK-NEXT: [[TMP4048:%.*]] = zext i1 [[TMP4047]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4048]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP4049:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4050:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4051:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4049]], i64 [[TMP4050]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4052:%.*]] = extractvalue { i64, i1 } [[TMP4051]], 0 +// CHECK-NEXT: [[TMP4053:%.*]] = extractvalue { i64, i1 } [[TMP4051]], 1 +// CHECK-NEXT: br i1 [[TMP4053]], label [[LLX_ATOMIC_EXIT397:%.*]], label [[LLX_ATOMIC_CONT398:%.*]] +// CHECK: llx.atomic.cont398: +// CHECK-NEXT: store i64 [[TMP4052]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT397]] +// CHECK: llx.atomic.exit397: +// CHECK-NEXT: [[TMP4054:%.*]] = extractvalue { i64, i1 } [[TMP4051]], 1 +// CHECK-NEXT: [[TMP4055:%.*]] = zext i1 [[TMP4054]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4055]], i64* [[LLR]], align 8 +// CHECK-NEXT: [[TMP4056:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4057:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP4056]] release, align 8 +// CHECK-NEXT: store volatile i64 [[TMP4057]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4058:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4059:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP4058]] release, align 8 +// CHECK-NEXT: store volatile i64 [[TMP4059]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4060:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4061:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP4060]] release, align 8 +// CHECK-NEXT: store volatile i64 [[TMP4061]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4062:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4063:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP4062]] release, align 8 +// CHECK-NEXT: store volatile i64 [[TMP4063]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4064:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4065:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4066:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4064]], i64 [[TMP4065]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4067:%.*]] = extractvalue { i64, i1 } [[TMP4066]], 0 +// CHECK-NEXT: store volatile i64 [[TMP4067]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4068:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4069:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4070:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4068]], i64 [[TMP4069]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4071:%.*]] = extractvalue { i64, i1 } [[TMP4070]], 0 +// CHECK-NEXT: store volatile i64 [[TMP4071]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4072:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4073:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP4072]] release, align 8 +// CHECK-NEXT: [[TMP4074:%.*]] = icmp ugt i64 [[TMP4073]], [[TMP4072]] +// CHECK-NEXT: [[TMP4075:%.*]] = select i1 [[TMP4074]], i64 [[TMP4072]], i64 [[TMP4073]] +// CHECK-NEXT: store volatile i64 [[TMP4075]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4076:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4077:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP4076]] release, align 8 +// CHECK-NEXT: [[TMP4078:%.*]] = icmp ult i64 [[TMP4077]], [[TMP4076]] +// CHECK-NEXT: [[TMP4079:%.*]] = select i1 [[TMP4078]], i64 [[TMP4076]], i64 [[TMP4077]] +// CHECK-NEXT: store volatile i64 [[TMP4079]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4080:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4081:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP4080]] release, align 8 +// CHECK-NEXT: [[TMP4082:%.*]] = icmp ult i64 [[TMP4081]], [[TMP4080]] +// CHECK-NEXT: [[TMP4083:%.*]] = select i1 [[TMP4082]], i64 [[TMP4080]], i64 [[TMP4081]] +// CHECK-NEXT: store volatile i64 [[TMP4083]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4084:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4085:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP4084]] release, align 8 +// CHECK-NEXT: [[TMP4086:%.*]] = icmp ugt i64 [[TMP4085]], [[TMP4084]] +// CHECK-NEXT: [[TMP4087:%.*]] = select i1 [[TMP4086]], i64 [[TMP4084]], i64 [[TMP4085]] +// CHECK-NEXT: store volatile i64 [[TMP4087]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4088:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4089:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4090:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4088]], i64 [[TMP4089]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4091:%.*]] = extractvalue { i64, i1 } [[TMP4090]], 0 +// CHECK-NEXT: [[TMP4092:%.*]] = extractvalue { i64, i1 } [[TMP4090]], 1 +// CHECK-NEXT: [[TMP4093:%.*]] = select i1 [[TMP4092]], i64 [[TMP4088]], i64 [[TMP4091]] +// CHECK-NEXT: store volatile i64 [[TMP4093]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4094:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4095:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4096:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4094]], i64 [[TMP4095]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4097:%.*]] = extractvalue { i64, i1 } [[TMP4096]], 0 +// CHECK-NEXT: [[TMP4098:%.*]] = extractvalue { i64, i1 } [[TMP4096]], 1 +// CHECK-NEXT: [[TMP4099:%.*]] = select i1 [[TMP4098]], i64 [[TMP4094]], i64 [[TMP4097]] +// CHECK-NEXT: store volatile i64 [[TMP4099]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4100:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4101:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4102:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4100]], i64 [[TMP4101]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4103:%.*]] = extractvalue { i64, i1 } [[TMP4102]], 0 +// CHECK-NEXT: [[TMP4104:%.*]] = extractvalue { i64, i1 } [[TMP4102]], 1 +// CHECK-NEXT: br i1 [[TMP4104]], label [[LLX_ATOMIC_EXIT399:%.*]], label [[LLX_ATOMIC_CONT400:%.*]] +// CHECK: llx.atomic.cont400: +// CHECK-NEXT: store i64 [[TMP4103]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT399]] +// CHECK: llx.atomic.exit399: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4105:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4106:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4107:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4105]], i64 [[TMP4106]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4108:%.*]] = extractvalue { i64, i1 } [[TMP4107]], 0 +// CHECK-NEXT: [[TMP4109:%.*]] = extractvalue { i64, i1 } [[TMP4107]], 1 +// CHECK-NEXT: br i1 [[TMP4109]], label [[LLX_ATOMIC_EXIT401:%.*]], label [[LLX_ATOMIC_CONT402:%.*]] +// CHECK: llx.atomic.cont402: +// CHECK-NEXT: store i64 [[TMP4108]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT401]] +// CHECK: llx.atomic.exit401: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4110:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4111:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4112:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4110]], i64 [[TMP4111]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4113:%.*]] = extractvalue { i64, i1 } [[TMP4112]], 1 +// CHECK-NEXT: [[TMP4114:%.*]] = zext i1 [[TMP4113]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4114]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4115:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4116:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4117:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4115]], i64 [[TMP4116]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4118:%.*]] = extractvalue { i64, i1 } [[TMP4117]], 1 +// CHECK-NEXT: [[TMP4119:%.*]] = zext i1 [[TMP4118]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4119]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4120:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4121:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4122:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4120]], i64 [[TMP4121]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4123:%.*]] = extractvalue { i64, i1 } [[TMP4122]], 0 +// CHECK-NEXT: [[TMP4124:%.*]] = extractvalue { i64, i1 } [[TMP4122]], 1 +// CHECK-NEXT: br i1 [[TMP4124]], label [[LLX_ATOMIC_EXIT403:%.*]], label [[LLX_ATOMIC_CONT404:%.*]] +// CHECK: llx.atomic.cont404: +// CHECK-NEXT: store i64 [[TMP4123]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT403]] +// CHECK: llx.atomic.exit403: +// CHECK-NEXT: [[TMP4125:%.*]] = extractvalue { i64, i1 } [[TMP4122]], 1 +// CHECK-NEXT: [[TMP4126:%.*]] = zext i1 [[TMP4125]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4126]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4127:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4128:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4129:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4127]], i64 [[TMP4128]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4130:%.*]] = extractvalue { i64, i1 } [[TMP4129]], 0 +// CHECK-NEXT: [[TMP4131:%.*]] = extractvalue { i64, i1 } [[TMP4129]], 1 +// CHECK-NEXT: br i1 [[TMP4131]], label [[LLX_ATOMIC_EXIT405:%.*]], label [[LLX_ATOMIC_CONT406:%.*]] +// CHECK: llx.atomic.cont406: +// CHECK-NEXT: store i64 [[TMP4130]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT405]] +// CHECK: llx.atomic.exit405: +// CHECK-NEXT: [[TMP4132:%.*]] = extractvalue { i64, i1 } [[TMP4129]], 1 +// CHECK-NEXT: [[TMP4133:%.*]] = zext i1 [[TMP4132]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4133]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4134:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4135:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP4134]] seq_cst, align 8 +// CHECK-NEXT: store volatile i64 [[TMP4135]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4136:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4137:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP4136]] seq_cst, align 8 +// CHECK-NEXT: store volatile i64 [[TMP4137]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4138:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4139:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP4138]] seq_cst, align 8 +// CHECK-NEXT: store volatile i64 [[TMP4139]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4140:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4141:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP4140]] seq_cst, align 8 +// CHECK-NEXT: store volatile i64 [[TMP4141]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4142:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4143:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4144:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4142]], i64 [[TMP4143]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4145:%.*]] = extractvalue { i64, i1 } [[TMP4144]], 0 +// CHECK-NEXT: store volatile i64 [[TMP4145]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4146:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4147:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4148:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4146]], i64 [[TMP4147]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4149:%.*]] = extractvalue { i64, i1 } [[TMP4148]], 0 +// CHECK-NEXT: store volatile i64 [[TMP4149]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4150:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4151:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP4150]] seq_cst, align 8 +// CHECK-NEXT: [[TMP4152:%.*]] = icmp ugt i64 [[TMP4151]], [[TMP4150]] +// CHECK-NEXT: [[TMP4153:%.*]] = select i1 [[TMP4152]], i64 [[TMP4150]], i64 [[TMP4151]] +// CHECK-NEXT: store volatile i64 [[TMP4153]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4154:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4155:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP4154]] seq_cst, align 8 +// CHECK-NEXT: [[TMP4156:%.*]] = icmp ult i64 [[TMP4155]], [[TMP4154]] +// CHECK-NEXT: [[TMP4157:%.*]] = select i1 [[TMP4156]], i64 [[TMP4154]], i64 [[TMP4155]] +// CHECK-NEXT: store volatile i64 [[TMP4157]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4158:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4159:%.*]] = atomicrmw umin i64* [[LLX]], i64 [[TMP4158]] seq_cst, align 8 +// CHECK-NEXT: [[TMP4160:%.*]] = icmp ult i64 [[TMP4159]], [[TMP4158]] +// CHECK-NEXT: [[TMP4161:%.*]] = select i1 [[TMP4160]], i64 [[TMP4158]], i64 [[TMP4159]] +// CHECK-NEXT: store volatile i64 [[TMP4161]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4162:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4163:%.*]] = atomicrmw umax i64* [[LLX]], i64 [[TMP4162]] seq_cst, align 8 +// CHECK-NEXT: [[TMP4164:%.*]] = icmp ugt i64 [[TMP4163]], [[TMP4162]] +// CHECK-NEXT: [[TMP4165:%.*]] = select i1 [[TMP4164]], i64 [[TMP4162]], i64 [[TMP4163]] +// CHECK-NEXT: store volatile i64 [[TMP4165]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4166:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4167:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4168:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4166]], i64 [[TMP4167]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4169:%.*]] = extractvalue { i64, i1 } [[TMP4168]], 0 +// CHECK-NEXT: [[TMP4170:%.*]] = extractvalue { i64, i1 } [[TMP4168]], 1 +// CHECK-NEXT: [[TMP4171:%.*]] = select i1 [[TMP4170]], i64 [[TMP4166]], i64 [[TMP4169]] +// CHECK-NEXT: store volatile i64 [[TMP4171]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4172:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4173:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4174:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4172]], i64 [[TMP4173]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4175:%.*]] = extractvalue { i64, i1 } [[TMP4174]], 0 +// CHECK-NEXT: [[TMP4176:%.*]] = extractvalue { i64, i1 } [[TMP4174]], 1 +// CHECK-NEXT: [[TMP4177:%.*]] = select i1 [[TMP4176]], i64 [[TMP4172]], i64 [[TMP4175]] +// CHECK-NEXT: store volatile i64 [[TMP4177]], i64* [[LLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4178:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4179:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4180:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4178]], i64 [[TMP4179]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4181:%.*]] = extractvalue { i64, i1 } [[TMP4180]], 0 +// CHECK-NEXT: [[TMP4182:%.*]] = extractvalue { i64, i1 } [[TMP4180]], 1 +// CHECK-NEXT: br i1 [[TMP4182]], label [[LLX_ATOMIC_EXIT407:%.*]], label [[LLX_ATOMIC_CONT408:%.*]] +// CHECK: llx.atomic.cont408: +// CHECK-NEXT: store i64 [[TMP4181]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT407]] +// CHECK: llx.atomic.exit407: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4183:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4184:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4185:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4183]], i64 [[TMP4184]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4186:%.*]] = extractvalue { i64, i1 } [[TMP4185]], 0 +// CHECK-NEXT: [[TMP4187:%.*]] = extractvalue { i64, i1 } [[TMP4185]], 1 +// CHECK-NEXT: br i1 [[TMP4187]], label [[LLX_ATOMIC_EXIT409:%.*]], label [[LLX_ATOMIC_CONT410:%.*]] +// CHECK: llx.atomic.cont410: +// CHECK-NEXT: store i64 [[TMP4186]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT409]] +// CHECK: llx.atomic.exit409: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4188:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4189:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4190:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4188]], i64 [[TMP4189]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4191:%.*]] = extractvalue { i64, i1 } [[TMP4190]], 1 +// CHECK-NEXT: [[TMP4192:%.*]] = zext i1 [[TMP4191]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4192]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4193:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4194:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4195:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4193]], i64 [[TMP4194]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4196:%.*]] = extractvalue { i64, i1 } [[TMP4195]], 1 +// CHECK-NEXT: [[TMP4197:%.*]] = zext i1 [[TMP4196]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4197]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4198:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4199:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4200:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4198]], i64 [[TMP4199]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4201:%.*]] = extractvalue { i64, i1 } [[TMP4200]], 0 +// CHECK-NEXT: [[TMP4202:%.*]] = extractvalue { i64, i1 } [[TMP4200]], 1 +// CHECK-NEXT: br i1 [[TMP4202]], label [[LLX_ATOMIC_EXIT411:%.*]], label [[LLX_ATOMIC_CONT412:%.*]] +// CHECK: llx.atomic.cont412: +// CHECK-NEXT: store i64 [[TMP4201]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT411]] +// CHECK: llx.atomic.exit411: +// CHECK-NEXT: [[TMP4203:%.*]] = extractvalue { i64, i1 } [[TMP4200]], 1 +// CHECK-NEXT: [[TMP4204:%.*]] = zext i1 [[TMP4203]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4204]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4205:%.*]] = load i64, i64* [[LLE]], align 8 +// CHECK-NEXT: [[TMP4206:%.*]] = load i64, i64* [[LLD]], align 8 +// CHECK-NEXT: [[TMP4207:%.*]] = cmpxchg i64* [[LLX]], i64 [[TMP4205]], i64 [[TMP4206]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4208:%.*]] = extractvalue { i64, i1 } [[TMP4207]], 0 +// CHECK-NEXT: [[TMP4209:%.*]] = extractvalue { i64, i1 } [[TMP4207]], 1 +// CHECK-NEXT: br i1 [[TMP4209]], label [[LLX_ATOMIC_EXIT413:%.*]], label [[LLX_ATOMIC_CONT414:%.*]] +// CHECK: llx.atomic.cont414: +// CHECK-NEXT: store i64 [[TMP4208]], i64* [[LLV]], align 8 +// CHECK-NEXT: br label [[LLX_ATOMIC_EXIT413]] +// CHECK: llx.atomic.exit413: +// CHECK-NEXT: [[TMP4210:%.*]] = extractvalue { i64, i1 } [[TMP4207]], 1 +// CHECK-NEXT: [[TMP4211:%.*]] = zext i1 [[TMP4210]] to i64 +// CHECK-NEXT: store volatile i64 [[TMP4211]], i64* [[LLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4212:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4213:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4212]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP4213]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4214:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4215:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4214]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP4215]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4216:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4217:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4216]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP4217]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4218:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4219:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4218]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP4219]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4220:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4221:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4222:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4220]], i64 [[TMP4221]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4223:%.*]] = extractvalue { i64, i1 } [[TMP4222]], 0 +// CHECK-NEXT: store i64 [[TMP4223]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4224:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4225:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4226:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4224]], i64 [[TMP4225]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4227:%.*]] = extractvalue { i64, i1 } [[TMP4226]], 0 +// CHECK-NEXT: store i64 [[TMP4227]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4228:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4229:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4228]] monotonic, align 8 +// CHECK-NEXT: [[TMP4230:%.*]] = icmp ugt i64 [[TMP4229]], [[TMP4228]] +// CHECK-NEXT: [[TMP4231:%.*]] = select i1 [[TMP4230]], i64 [[TMP4228]], i64 [[TMP4229]] +// CHECK-NEXT: store i64 [[TMP4231]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4232:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4233:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4232]] monotonic, align 8 +// CHECK-NEXT: [[TMP4234:%.*]] = icmp ult i64 [[TMP4233]], [[TMP4232]] +// CHECK-NEXT: [[TMP4235:%.*]] = select i1 [[TMP4234]], i64 [[TMP4232]], i64 [[TMP4233]] +// CHECK-NEXT: store i64 [[TMP4235]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4236:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4237:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4236]] monotonic, align 8 +// CHECK-NEXT: [[TMP4238:%.*]] = icmp ult i64 [[TMP4237]], [[TMP4236]] +// CHECK-NEXT: [[TMP4239:%.*]] = select i1 [[TMP4238]], i64 [[TMP4236]], i64 [[TMP4237]] +// CHECK-NEXT: store i64 [[TMP4239]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4240:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4241:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4240]] monotonic, align 8 +// CHECK-NEXT: [[TMP4242:%.*]] = icmp ugt i64 [[TMP4241]], [[TMP4240]] +// CHECK-NEXT: [[TMP4243:%.*]] = select i1 [[TMP4242]], i64 [[TMP4240]], i64 [[TMP4241]] +// CHECK-NEXT: store i64 [[TMP4243]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4244:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4245:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4246:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4244]], i64 [[TMP4245]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4247:%.*]] = extractvalue { i64, i1 } [[TMP4246]], 0 +// CHECK-NEXT: [[TMP4248:%.*]] = extractvalue { i64, i1 } [[TMP4246]], 1 +// CHECK-NEXT: [[TMP4249:%.*]] = select i1 [[TMP4248]], i64 [[TMP4244]], i64 [[TMP4247]] +// CHECK-NEXT: store i64 [[TMP4249]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4250:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4251:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4252:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4250]], i64 [[TMP4251]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4253:%.*]] = extractvalue { i64, i1 } [[TMP4252]], 0 +// CHECK-NEXT: [[TMP4254:%.*]] = extractvalue { i64, i1 } [[TMP4252]], 1 +// CHECK-NEXT: [[TMP4255:%.*]] = select i1 [[TMP4254]], i64 [[TMP4250]], i64 [[TMP4253]] +// CHECK-NEXT: store i64 [[TMP4255]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4256:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4257:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4258:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4256]], i64 [[TMP4257]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4259:%.*]] = extractvalue { i64, i1 } [[TMP4258]], 0 +// CHECK-NEXT: [[TMP4260:%.*]] = extractvalue { i64, i1 } [[TMP4258]], 1 +// CHECK-NEXT: br i1 [[TMP4260]], label [[ULLX_ATOMIC_EXIT:%.*]], label [[ULLX_ATOMIC_CONT:%.*]] +// CHECK: ullx.atomic.cont: +// CHECK-NEXT: store i64 [[TMP4259]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT]] +// CHECK: ullx.atomic.exit: +// CHECK-NEXT: [[TMP4261:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4262:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4263:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4261]], i64 [[TMP4262]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4264:%.*]] = extractvalue { i64, i1 } [[TMP4263]], 0 +// CHECK-NEXT: [[TMP4265:%.*]] = extractvalue { i64, i1 } [[TMP4263]], 1 +// CHECK-NEXT: br i1 [[TMP4265]], label [[ULLX_ATOMIC_EXIT415:%.*]], label [[ULLX_ATOMIC_CONT416:%.*]] +// CHECK: ullx.atomic.cont416: +// CHECK-NEXT: store i64 [[TMP4264]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT415]] +// CHECK: ullx.atomic.exit415: +// CHECK-NEXT: [[TMP4266:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4267:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4268:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4266]], i64 [[TMP4267]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4269:%.*]] = extractvalue { i64, i1 } [[TMP4268]], 1 +// CHECK-NEXT: [[TMP4270:%.*]] = zext i1 [[TMP4269]] to i64 +// CHECK-NEXT: store i64 [[TMP4270]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4271:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4272:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4273:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4271]], i64 [[TMP4272]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4274:%.*]] = extractvalue { i64, i1 } [[TMP4273]], 1 +// CHECK-NEXT: [[TMP4275:%.*]] = zext i1 [[TMP4274]] to i64 +// CHECK-NEXT: store i64 [[TMP4275]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4276:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4277:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4278:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4276]], i64 [[TMP4277]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4279:%.*]] = extractvalue { i64, i1 } [[TMP4278]], 0 +// CHECK-NEXT: [[TMP4280:%.*]] = extractvalue { i64, i1 } [[TMP4278]], 1 +// CHECK-NEXT: br i1 [[TMP4280]], label [[ULLX_ATOMIC_EXIT417:%.*]], label [[ULLX_ATOMIC_CONT418:%.*]] +// CHECK: ullx.atomic.cont418: +// CHECK-NEXT: store i64 [[TMP4279]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT417]] +// CHECK: ullx.atomic.exit417: +// CHECK-NEXT: [[TMP4281:%.*]] = extractvalue { i64, i1 } [[TMP4278]], 1 +// CHECK-NEXT: [[TMP4282:%.*]] = zext i1 [[TMP4281]] to i64 +// CHECK-NEXT: store i64 [[TMP4282]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4283:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4284:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4285:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4283]], i64 [[TMP4284]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4286:%.*]] = extractvalue { i64, i1 } [[TMP4285]], 0 +// CHECK-NEXT: [[TMP4287:%.*]] = extractvalue { i64, i1 } [[TMP4285]], 1 +// CHECK-NEXT: br i1 [[TMP4287]], label [[ULLX_ATOMIC_EXIT419:%.*]], label [[ULLX_ATOMIC_CONT420:%.*]] +// CHECK: ullx.atomic.cont420: +// CHECK-NEXT: store i64 [[TMP4286]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT419]] +// CHECK: ullx.atomic.exit419: +// CHECK-NEXT: [[TMP4288:%.*]] = extractvalue { i64, i1 } [[TMP4285]], 1 +// CHECK-NEXT: [[TMP4289:%.*]] = zext i1 [[TMP4288]] to i64 +// CHECK-NEXT: store i64 [[TMP4289]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4290:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4291:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4290]] acq_rel, align 8 +// CHECK-NEXT: store i64 [[TMP4291]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4292:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4293:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4292]] acq_rel, align 8 +// CHECK-NEXT: store i64 [[TMP4293]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4294:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4295:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4294]] acq_rel, align 8 +// CHECK-NEXT: store i64 [[TMP4295]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4296:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4297:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4296]] acq_rel, align 8 +// CHECK-NEXT: store i64 [[TMP4297]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4298:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4299:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4300:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4298]], i64 [[TMP4299]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP4301:%.*]] = extractvalue { i64, i1 } [[TMP4300]], 0 +// CHECK-NEXT: store i64 [[TMP4301]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4302:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4303:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4304:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4302]], i64 [[TMP4303]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP4305:%.*]] = extractvalue { i64, i1 } [[TMP4304]], 0 +// CHECK-NEXT: store i64 [[TMP4305]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4306:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4307:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4306]] acq_rel, align 8 +// CHECK-NEXT: [[TMP4308:%.*]] = icmp ugt i64 [[TMP4307]], [[TMP4306]] +// CHECK-NEXT: [[TMP4309:%.*]] = select i1 [[TMP4308]], i64 [[TMP4306]], i64 [[TMP4307]] +// CHECK-NEXT: store i64 [[TMP4309]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4310:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4311:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4310]] acq_rel, align 8 +// CHECK-NEXT: [[TMP4312:%.*]] = icmp ult i64 [[TMP4311]], [[TMP4310]] +// CHECK-NEXT: [[TMP4313:%.*]] = select i1 [[TMP4312]], i64 [[TMP4310]], i64 [[TMP4311]] +// CHECK-NEXT: store i64 [[TMP4313]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4314:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4315:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4314]] acq_rel, align 8 +// CHECK-NEXT: [[TMP4316:%.*]] = icmp ult i64 [[TMP4315]], [[TMP4314]] +// CHECK-NEXT: [[TMP4317:%.*]] = select i1 [[TMP4316]], i64 [[TMP4314]], i64 [[TMP4315]] +// CHECK-NEXT: store i64 [[TMP4317]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4318:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4319:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4318]] acq_rel, align 8 +// CHECK-NEXT: [[TMP4320:%.*]] = icmp ugt i64 [[TMP4319]], [[TMP4318]] +// CHECK-NEXT: [[TMP4321:%.*]] = select i1 [[TMP4320]], i64 [[TMP4318]], i64 [[TMP4319]] +// CHECK-NEXT: store i64 [[TMP4321]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4322:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4323:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4324:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4322]], i64 [[TMP4323]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP4325:%.*]] = extractvalue { i64, i1 } [[TMP4324]], 0 +// CHECK-NEXT: [[TMP4326:%.*]] = extractvalue { i64, i1 } [[TMP4324]], 1 +// CHECK-NEXT: [[TMP4327:%.*]] = select i1 [[TMP4326]], i64 [[TMP4322]], i64 [[TMP4325]] +// CHECK-NEXT: store i64 [[TMP4327]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4328:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4329:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4330:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4328]], i64 [[TMP4329]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP4331:%.*]] = extractvalue { i64, i1 } [[TMP4330]], 0 +// CHECK-NEXT: [[TMP4332:%.*]] = extractvalue { i64, i1 } [[TMP4330]], 1 +// CHECK-NEXT: [[TMP4333:%.*]] = select i1 [[TMP4332]], i64 [[TMP4328]], i64 [[TMP4331]] +// CHECK-NEXT: store i64 [[TMP4333]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4334:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4335:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4336:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4334]], i64 [[TMP4335]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP4337:%.*]] = extractvalue { i64, i1 } [[TMP4336]], 0 +// CHECK-NEXT: [[TMP4338:%.*]] = extractvalue { i64, i1 } [[TMP4336]], 1 +// CHECK-NEXT: br i1 [[TMP4338]], label [[ULLX_ATOMIC_EXIT421:%.*]], label [[ULLX_ATOMIC_CONT422:%.*]] +// CHECK: ullx.atomic.cont422: +// CHECK-NEXT: store i64 [[TMP4337]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT421]] +// CHECK: ullx.atomic.exit421: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4339:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4340:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4341:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4339]], i64 [[TMP4340]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP4342:%.*]] = extractvalue { i64, i1 } [[TMP4341]], 0 +// CHECK-NEXT: [[TMP4343:%.*]] = extractvalue { i64, i1 } [[TMP4341]], 1 +// CHECK-NEXT: br i1 [[TMP4343]], label [[ULLX_ATOMIC_EXIT423:%.*]], label [[ULLX_ATOMIC_CONT424:%.*]] +// CHECK: ullx.atomic.cont424: +// CHECK-NEXT: store i64 [[TMP4342]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT423]] +// CHECK: ullx.atomic.exit423: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4344:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4345:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4346:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4344]], i64 [[TMP4345]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP4347:%.*]] = extractvalue { i64, i1 } [[TMP4346]], 1 +// CHECK-NEXT: [[TMP4348:%.*]] = zext i1 [[TMP4347]] to i64 +// CHECK-NEXT: store i64 [[TMP4348]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4349:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4350:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4351:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4349]], i64 [[TMP4350]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP4352:%.*]] = extractvalue { i64, i1 } [[TMP4351]], 1 +// CHECK-NEXT: [[TMP4353:%.*]] = zext i1 [[TMP4352]] to i64 +// CHECK-NEXT: store i64 [[TMP4353]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4354:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4355:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4356:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4354]], i64 [[TMP4355]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP4357:%.*]] = extractvalue { i64, i1 } [[TMP4356]], 0 +// CHECK-NEXT: [[TMP4358:%.*]] = extractvalue { i64, i1 } [[TMP4356]], 1 +// CHECK-NEXT: br i1 [[TMP4358]], label [[ULLX_ATOMIC_EXIT425:%.*]], label [[ULLX_ATOMIC_CONT426:%.*]] +// CHECK: ullx.atomic.cont426: +// CHECK-NEXT: store i64 [[TMP4357]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT425]] +// CHECK: ullx.atomic.exit425: +// CHECK-NEXT: [[TMP4359:%.*]] = extractvalue { i64, i1 } [[TMP4356]], 1 +// CHECK-NEXT: [[TMP4360:%.*]] = zext i1 [[TMP4359]] to i64 +// CHECK-NEXT: store i64 [[TMP4360]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4361:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4362:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4363:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4361]], i64 [[TMP4362]] acq_rel acquire, align 8 +// CHECK-NEXT: [[TMP4364:%.*]] = extractvalue { i64, i1 } [[TMP4363]], 0 +// CHECK-NEXT: [[TMP4365:%.*]] = extractvalue { i64, i1 } [[TMP4363]], 1 +// CHECK-NEXT: br i1 [[TMP4365]], label [[ULLX_ATOMIC_EXIT427:%.*]], label [[ULLX_ATOMIC_CONT428:%.*]] +// CHECK: ullx.atomic.cont428: +// CHECK-NEXT: store i64 [[TMP4364]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT427]] +// CHECK: ullx.atomic.exit427: +// CHECK-NEXT: [[TMP4366:%.*]] = extractvalue { i64, i1 } [[TMP4363]], 1 +// CHECK-NEXT: [[TMP4367:%.*]] = zext i1 [[TMP4366]] to i64 +// CHECK-NEXT: store i64 [[TMP4367]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4368:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4369:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4368]] acquire, align 8 +// CHECK-NEXT: store i64 [[TMP4369]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4370:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4371:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4370]] acquire, align 8 +// CHECK-NEXT: store i64 [[TMP4371]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4372:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4373:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4372]] acquire, align 8 +// CHECK-NEXT: store i64 [[TMP4373]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4374:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4375:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4374]] acquire, align 8 +// CHECK-NEXT: store i64 [[TMP4375]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4376:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4377:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4378:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4376]], i64 [[TMP4377]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP4379:%.*]] = extractvalue { i64, i1 } [[TMP4378]], 0 +// CHECK-NEXT: store i64 [[TMP4379]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4380:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4381:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4382:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4380]], i64 [[TMP4381]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP4383:%.*]] = extractvalue { i64, i1 } [[TMP4382]], 0 +// CHECK-NEXT: store i64 [[TMP4383]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4384:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4385:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4384]] acquire, align 8 +// CHECK-NEXT: [[TMP4386:%.*]] = icmp ugt i64 [[TMP4385]], [[TMP4384]] +// CHECK-NEXT: [[TMP4387:%.*]] = select i1 [[TMP4386]], i64 [[TMP4384]], i64 [[TMP4385]] +// CHECK-NEXT: store i64 [[TMP4387]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4388:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4389:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4388]] acquire, align 8 +// CHECK-NEXT: [[TMP4390:%.*]] = icmp ult i64 [[TMP4389]], [[TMP4388]] +// CHECK-NEXT: [[TMP4391:%.*]] = select i1 [[TMP4390]], i64 [[TMP4388]], i64 [[TMP4389]] +// CHECK-NEXT: store i64 [[TMP4391]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4392:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4393:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4392]] acquire, align 8 +// CHECK-NEXT: [[TMP4394:%.*]] = icmp ult i64 [[TMP4393]], [[TMP4392]] +// CHECK-NEXT: [[TMP4395:%.*]] = select i1 [[TMP4394]], i64 [[TMP4392]], i64 [[TMP4393]] +// CHECK-NEXT: store i64 [[TMP4395]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4396:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4397:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4396]] acquire, align 8 +// CHECK-NEXT: [[TMP4398:%.*]] = icmp ugt i64 [[TMP4397]], [[TMP4396]] +// CHECK-NEXT: [[TMP4399:%.*]] = select i1 [[TMP4398]], i64 [[TMP4396]], i64 [[TMP4397]] +// CHECK-NEXT: store i64 [[TMP4399]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4400:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4401:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4402:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4400]], i64 [[TMP4401]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP4403:%.*]] = extractvalue { i64, i1 } [[TMP4402]], 0 +// CHECK-NEXT: [[TMP4404:%.*]] = extractvalue { i64, i1 } [[TMP4402]], 1 +// CHECK-NEXT: [[TMP4405:%.*]] = select i1 [[TMP4404]], i64 [[TMP4400]], i64 [[TMP4403]] +// CHECK-NEXT: store i64 [[TMP4405]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4406:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4407:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4408:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4406]], i64 [[TMP4407]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP4409:%.*]] = extractvalue { i64, i1 } [[TMP4408]], 0 +// CHECK-NEXT: [[TMP4410:%.*]] = extractvalue { i64, i1 } [[TMP4408]], 1 +// CHECK-NEXT: [[TMP4411:%.*]] = select i1 [[TMP4410]], i64 [[TMP4406]], i64 [[TMP4409]] +// CHECK-NEXT: store i64 [[TMP4411]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4412:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4413:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4414:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4412]], i64 [[TMP4413]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP4415:%.*]] = extractvalue { i64, i1 } [[TMP4414]], 0 +// CHECK-NEXT: [[TMP4416:%.*]] = extractvalue { i64, i1 } [[TMP4414]], 1 +// CHECK-NEXT: br i1 [[TMP4416]], label [[ULLX_ATOMIC_EXIT429:%.*]], label [[ULLX_ATOMIC_CONT430:%.*]] +// CHECK: ullx.atomic.cont430: +// CHECK-NEXT: store i64 [[TMP4415]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT429]] +// CHECK: ullx.atomic.exit429: +// CHECK-NEXT: [[TMP4417:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4418:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4419:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4417]], i64 [[TMP4418]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP4420:%.*]] = extractvalue { i64, i1 } [[TMP4419]], 0 +// CHECK-NEXT: [[TMP4421:%.*]] = extractvalue { i64, i1 } [[TMP4419]], 1 +// CHECK-NEXT: br i1 [[TMP4421]], label [[ULLX_ATOMIC_EXIT431:%.*]], label [[ULLX_ATOMIC_CONT432:%.*]] +// CHECK: ullx.atomic.cont432: +// CHECK-NEXT: store i64 [[TMP4420]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT431]] +// CHECK: ullx.atomic.exit431: +// CHECK-NEXT: [[TMP4422:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4423:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4424:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4422]], i64 [[TMP4423]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP4425:%.*]] = extractvalue { i64, i1 } [[TMP4424]], 1 +// CHECK-NEXT: [[TMP4426:%.*]] = zext i1 [[TMP4425]] to i64 +// CHECK-NEXT: store i64 [[TMP4426]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4427:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4428:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4429:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4427]], i64 [[TMP4428]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP4430:%.*]] = extractvalue { i64, i1 } [[TMP4429]], 1 +// CHECK-NEXT: [[TMP4431:%.*]] = zext i1 [[TMP4430]] to i64 +// CHECK-NEXT: store i64 [[TMP4431]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4432:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4433:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4434:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4432]], i64 [[TMP4433]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP4435:%.*]] = extractvalue { i64, i1 } [[TMP4434]], 0 +// CHECK-NEXT: [[TMP4436:%.*]] = extractvalue { i64, i1 } [[TMP4434]], 1 +// CHECK-NEXT: br i1 [[TMP4436]], label [[ULLX_ATOMIC_EXIT433:%.*]], label [[ULLX_ATOMIC_CONT434:%.*]] +// CHECK: ullx.atomic.cont434: +// CHECK-NEXT: store i64 [[TMP4435]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT433]] +// CHECK: ullx.atomic.exit433: +// CHECK-NEXT: [[TMP4437:%.*]] = extractvalue { i64, i1 } [[TMP4434]], 1 +// CHECK-NEXT: [[TMP4438:%.*]] = zext i1 [[TMP4437]] to i64 +// CHECK-NEXT: store i64 [[TMP4438]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4439:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4440:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4441:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4439]], i64 [[TMP4440]] acquire acquire, align 8 +// CHECK-NEXT: [[TMP4442:%.*]] = extractvalue { i64, i1 } [[TMP4441]], 0 +// CHECK-NEXT: [[TMP4443:%.*]] = extractvalue { i64, i1 } [[TMP4441]], 1 +// CHECK-NEXT: br i1 [[TMP4443]], label [[ULLX_ATOMIC_EXIT435:%.*]], label [[ULLX_ATOMIC_CONT436:%.*]] +// CHECK: ullx.atomic.cont436: +// CHECK-NEXT: store i64 [[TMP4442]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT435]] +// CHECK: ullx.atomic.exit435: +// CHECK-NEXT: [[TMP4444:%.*]] = extractvalue { i64, i1 } [[TMP4441]], 1 +// CHECK-NEXT: [[TMP4445:%.*]] = zext i1 [[TMP4444]] to i64 +// CHECK-NEXT: store i64 [[TMP4445]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4446:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4447:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4446]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP4447]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4448:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4449:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4448]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP4449]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4450:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4451:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4450]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP4451]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4452:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4453:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4452]] monotonic, align 8 +// CHECK-NEXT: store i64 [[TMP4453]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4454:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4455:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4456:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4454]], i64 [[TMP4455]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4457:%.*]] = extractvalue { i64, i1 } [[TMP4456]], 0 +// CHECK-NEXT: store i64 [[TMP4457]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4458:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4459:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4460:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4458]], i64 [[TMP4459]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4461:%.*]] = extractvalue { i64, i1 } [[TMP4460]], 0 +// CHECK-NEXT: store i64 [[TMP4461]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4462:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4463:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4462]] monotonic, align 8 +// CHECK-NEXT: [[TMP4464:%.*]] = icmp ugt i64 [[TMP4463]], [[TMP4462]] +// CHECK-NEXT: [[TMP4465:%.*]] = select i1 [[TMP4464]], i64 [[TMP4462]], i64 [[TMP4463]] +// CHECK-NEXT: store i64 [[TMP4465]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4466:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4467:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4466]] monotonic, align 8 +// CHECK-NEXT: [[TMP4468:%.*]] = icmp ult i64 [[TMP4467]], [[TMP4466]] +// CHECK-NEXT: [[TMP4469:%.*]] = select i1 [[TMP4468]], i64 [[TMP4466]], i64 [[TMP4467]] +// CHECK-NEXT: store i64 [[TMP4469]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4470:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4471:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4470]] monotonic, align 8 +// CHECK-NEXT: [[TMP4472:%.*]] = icmp ult i64 [[TMP4471]], [[TMP4470]] +// CHECK-NEXT: [[TMP4473:%.*]] = select i1 [[TMP4472]], i64 [[TMP4470]], i64 [[TMP4471]] +// CHECK-NEXT: store i64 [[TMP4473]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4474:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4475:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4474]] monotonic, align 8 +// CHECK-NEXT: [[TMP4476:%.*]] = icmp ugt i64 [[TMP4475]], [[TMP4474]] +// CHECK-NEXT: [[TMP4477:%.*]] = select i1 [[TMP4476]], i64 [[TMP4474]], i64 [[TMP4475]] +// CHECK-NEXT: store i64 [[TMP4477]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4478:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4479:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4480:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4478]], i64 [[TMP4479]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4481:%.*]] = extractvalue { i64, i1 } [[TMP4480]], 0 +// CHECK-NEXT: [[TMP4482:%.*]] = extractvalue { i64, i1 } [[TMP4480]], 1 +// CHECK-NEXT: [[TMP4483:%.*]] = select i1 [[TMP4482]], i64 [[TMP4478]], i64 [[TMP4481]] +// CHECK-NEXT: store i64 [[TMP4483]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4484:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4485:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4486:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4484]], i64 [[TMP4485]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4487:%.*]] = extractvalue { i64, i1 } [[TMP4486]], 0 +// CHECK-NEXT: [[TMP4488:%.*]] = extractvalue { i64, i1 } [[TMP4486]], 1 +// CHECK-NEXT: [[TMP4489:%.*]] = select i1 [[TMP4488]], i64 [[TMP4484]], i64 [[TMP4487]] +// CHECK-NEXT: store i64 [[TMP4489]], i64* [[ULLV]], align 8 +// CHECK-NEXT: [[TMP4490:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4491:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4492:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4490]], i64 [[TMP4491]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4493:%.*]] = extractvalue { i64, i1 } [[TMP4492]], 0 +// CHECK-NEXT: [[TMP4494:%.*]] = extractvalue { i64, i1 } [[TMP4492]], 1 +// CHECK-NEXT: br i1 [[TMP4494]], label [[ULLX_ATOMIC_EXIT437:%.*]], label [[ULLX_ATOMIC_CONT438:%.*]] +// CHECK: ullx.atomic.cont438: +// CHECK-NEXT: store i64 [[TMP4493]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT437]] +// CHECK: ullx.atomic.exit437: +// CHECK-NEXT: [[TMP4495:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4496:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4497:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4495]], i64 [[TMP4496]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4498:%.*]] = extractvalue { i64, i1 } [[TMP4497]], 0 +// CHECK-NEXT: [[TMP4499:%.*]] = extractvalue { i64, i1 } [[TMP4497]], 1 +// CHECK-NEXT: br i1 [[TMP4499]], label [[ULLX_ATOMIC_EXIT439:%.*]], label [[ULLX_ATOMIC_CONT440:%.*]] +// CHECK: ullx.atomic.cont440: +// CHECK-NEXT: store i64 [[TMP4498]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT439]] +// CHECK: ullx.atomic.exit439: +// CHECK-NEXT: [[TMP4500:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4501:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4502:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4500]], i64 [[TMP4501]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4503:%.*]] = extractvalue { i64, i1 } [[TMP4502]], 1 +// CHECK-NEXT: [[TMP4504:%.*]] = zext i1 [[TMP4503]] to i64 +// CHECK-NEXT: store i64 [[TMP4504]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4505:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4506:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4507:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4505]], i64 [[TMP4506]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4508:%.*]] = extractvalue { i64, i1 } [[TMP4507]], 1 +// CHECK-NEXT: [[TMP4509:%.*]] = zext i1 [[TMP4508]] to i64 +// CHECK-NEXT: store i64 [[TMP4509]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4510:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4511:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4512:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4510]], i64 [[TMP4511]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4513:%.*]] = extractvalue { i64, i1 } [[TMP4512]], 0 +// CHECK-NEXT: [[TMP4514:%.*]] = extractvalue { i64, i1 } [[TMP4512]], 1 +// CHECK-NEXT: br i1 [[TMP4514]], label [[ULLX_ATOMIC_EXIT441:%.*]], label [[ULLX_ATOMIC_CONT442:%.*]] +// CHECK: ullx.atomic.cont442: +// CHECK-NEXT: store i64 [[TMP4513]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT441]] +// CHECK: ullx.atomic.exit441: +// CHECK-NEXT: [[TMP4515:%.*]] = extractvalue { i64, i1 } [[TMP4512]], 1 +// CHECK-NEXT: [[TMP4516:%.*]] = zext i1 [[TMP4515]] to i64 +// CHECK-NEXT: store i64 [[TMP4516]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4517:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4518:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4519:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4517]], i64 [[TMP4518]] monotonic monotonic, align 8 +// CHECK-NEXT: [[TMP4520:%.*]] = extractvalue { i64, i1 } [[TMP4519]], 0 +// CHECK-NEXT: [[TMP4521:%.*]] = extractvalue { i64, i1 } [[TMP4519]], 1 +// CHECK-NEXT: br i1 [[TMP4521]], label [[ULLX_ATOMIC_EXIT443:%.*]], label [[ULLX_ATOMIC_CONT444:%.*]] +// CHECK: ullx.atomic.cont444: +// CHECK-NEXT: store i64 [[TMP4520]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT443]] +// CHECK: ullx.atomic.exit443: +// CHECK-NEXT: [[TMP4522:%.*]] = extractvalue { i64, i1 } [[TMP4519]], 1 +// CHECK-NEXT: [[TMP4523:%.*]] = zext i1 [[TMP4522]] to i64 +// CHECK-NEXT: store i64 [[TMP4523]], i64* [[ULLR]], align 8 +// CHECK-NEXT: [[TMP4524:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4525:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4524]] release, align 8 +// CHECK-NEXT: store i64 [[TMP4525]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4526:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4527:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4526]] release, align 8 +// CHECK-NEXT: store i64 [[TMP4527]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4528:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4529:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4528]] release, align 8 +// CHECK-NEXT: store i64 [[TMP4529]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4530:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4531:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4530]] release, align 8 +// CHECK-NEXT: store i64 [[TMP4531]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4532:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4533:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4534:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4532]], i64 [[TMP4533]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4535:%.*]] = extractvalue { i64, i1 } [[TMP4534]], 0 +// CHECK-NEXT: store i64 [[TMP4535]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4536:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4537:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4538:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4536]], i64 [[TMP4537]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4539:%.*]] = extractvalue { i64, i1 } [[TMP4538]], 0 +// CHECK-NEXT: store i64 [[TMP4539]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4540:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4541:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4540]] release, align 8 +// CHECK-NEXT: [[TMP4542:%.*]] = icmp ugt i64 [[TMP4541]], [[TMP4540]] +// CHECK-NEXT: [[TMP4543:%.*]] = select i1 [[TMP4542]], i64 [[TMP4540]], i64 [[TMP4541]] +// CHECK-NEXT: store i64 [[TMP4543]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4544:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4545:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4544]] release, align 8 +// CHECK-NEXT: [[TMP4546:%.*]] = icmp ult i64 [[TMP4545]], [[TMP4544]] +// CHECK-NEXT: [[TMP4547:%.*]] = select i1 [[TMP4546]], i64 [[TMP4544]], i64 [[TMP4545]] +// CHECK-NEXT: store i64 [[TMP4547]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4548:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4549:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4548]] release, align 8 +// CHECK-NEXT: [[TMP4550:%.*]] = icmp ult i64 [[TMP4549]], [[TMP4548]] +// CHECK-NEXT: [[TMP4551:%.*]] = select i1 [[TMP4550]], i64 [[TMP4548]], i64 [[TMP4549]] +// CHECK-NEXT: store i64 [[TMP4551]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4552:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4553:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4552]] release, align 8 +// CHECK-NEXT: [[TMP4554:%.*]] = icmp ugt i64 [[TMP4553]], [[TMP4552]] +// CHECK-NEXT: [[TMP4555:%.*]] = select i1 [[TMP4554]], i64 [[TMP4552]], i64 [[TMP4553]] +// CHECK-NEXT: store i64 [[TMP4555]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4556:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4557:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4558:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4556]], i64 [[TMP4557]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4559:%.*]] = extractvalue { i64, i1 } [[TMP4558]], 0 +// CHECK-NEXT: [[TMP4560:%.*]] = extractvalue { i64, i1 } [[TMP4558]], 1 +// CHECK-NEXT: [[TMP4561:%.*]] = select i1 [[TMP4560]], i64 [[TMP4556]], i64 [[TMP4559]] +// CHECK-NEXT: store i64 [[TMP4561]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4562:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4563:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4564:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4562]], i64 [[TMP4563]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4565:%.*]] = extractvalue { i64, i1 } [[TMP4564]], 0 +// CHECK-NEXT: [[TMP4566:%.*]] = extractvalue { i64, i1 } [[TMP4564]], 1 +// CHECK-NEXT: [[TMP4567:%.*]] = select i1 [[TMP4566]], i64 [[TMP4562]], i64 [[TMP4565]] +// CHECK-NEXT: store i64 [[TMP4567]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4568:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4569:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4570:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4568]], i64 [[TMP4569]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4571:%.*]] = extractvalue { i64, i1 } [[TMP4570]], 0 +// CHECK-NEXT: [[TMP4572:%.*]] = extractvalue { i64, i1 } [[TMP4570]], 1 +// CHECK-NEXT: br i1 [[TMP4572]], label [[ULLX_ATOMIC_EXIT445:%.*]], label [[ULLX_ATOMIC_CONT446:%.*]] +// CHECK: ullx.atomic.cont446: +// CHECK-NEXT: store i64 [[TMP4571]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT445]] +// CHECK: ullx.atomic.exit445: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4573:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4574:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4575:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4573]], i64 [[TMP4574]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4576:%.*]] = extractvalue { i64, i1 } [[TMP4575]], 0 +// CHECK-NEXT: [[TMP4577:%.*]] = extractvalue { i64, i1 } [[TMP4575]], 1 +// CHECK-NEXT: br i1 [[TMP4577]], label [[ULLX_ATOMIC_EXIT447:%.*]], label [[ULLX_ATOMIC_CONT448:%.*]] +// CHECK: ullx.atomic.cont448: +// CHECK-NEXT: store i64 [[TMP4576]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT447]] +// CHECK: ullx.atomic.exit447: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4578:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4579:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4580:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4578]], i64 [[TMP4579]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4581:%.*]] = extractvalue { i64, i1 } [[TMP4580]], 1 +// CHECK-NEXT: [[TMP4582:%.*]] = zext i1 [[TMP4581]] to i64 +// CHECK-NEXT: store i64 [[TMP4582]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4583:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4584:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4585:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4583]], i64 [[TMP4584]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4586:%.*]] = extractvalue { i64, i1 } [[TMP4585]], 1 +// CHECK-NEXT: [[TMP4587:%.*]] = zext i1 [[TMP4586]] to i64 +// CHECK-NEXT: store i64 [[TMP4587]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4588:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4589:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4590:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4588]], i64 [[TMP4589]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4591:%.*]] = extractvalue { i64, i1 } [[TMP4590]], 0 +// CHECK-NEXT: [[TMP4592:%.*]] = extractvalue { i64, i1 } [[TMP4590]], 1 +// CHECK-NEXT: br i1 [[TMP4592]], label [[ULLX_ATOMIC_EXIT449:%.*]], label [[ULLX_ATOMIC_CONT450:%.*]] +// CHECK: ullx.atomic.cont450: +// CHECK-NEXT: store i64 [[TMP4591]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT449]] +// CHECK: ullx.atomic.exit449: +// CHECK-NEXT: [[TMP4593:%.*]] = extractvalue { i64, i1 } [[TMP4590]], 1 +// CHECK-NEXT: [[TMP4594:%.*]] = zext i1 [[TMP4593]] to i64 +// CHECK-NEXT: store i64 [[TMP4594]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4595:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4596:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4597:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4595]], i64 [[TMP4596]] release monotonic, align 8 +// CHECK-NEXT: [[TMP4598:%.*]] = extractvalue { i64, i1 } [[TMP4597]], 0 +// CHECK-NEXT: [[TMP4599:%.*]] = extractvalue { i64, i1 } [[TMP4597]], 1 +// CHECK-NEXT: br i1 [[TMP4599]], label [[ULLX_ATOMIC_EXIT451:%.*]], label [[ULLX_ATOMIC_CONT452:%.*]] +// CHECK: ullx.atomic.cont452: +// CHECK-NEXT: store i64 [[TMP4598]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT451]] +// CHECK: ullx.atomic.exit451: +// CHECK-NEXT: [[TMP4600:%.*]] = extractvalue { i64, i1 } [[TMP4597]], 1 +// CHECK-NEXT: [[TMP4601:%.*]] = zext i1 [[TMP4600]] to i64 +// CHECK-NEXT: store i64 [[TMP4601]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4602:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4603:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4602]] seq_cst, align 8 +// CHECK-NEXT: store i64 [[TMP4603]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4604:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4605:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4604]] seq_cst, align 8 +// CHECK-NEXT: store i64 [[TMP4605]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4606:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4607:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4606]] seq_cst, align 8 +// CHECK-NEXT: store i64 [[TMP4607]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4608:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4609:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4608]] seq_cst, align 8 +// CHECK-NEXT: store i64 [[TMP4609]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4610:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4611:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4612:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4610]], i64 [[TMP4611]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4613:%.*]] = extractvalue { i64, i1 } [[TMP4612]], 0 +// CHECK-NEXT: store i64 [[TMP4613]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4614:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4615:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4616:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4614]], i64 [[TMP4615]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4617:%.*]] = extractvalue { i64, i1 } [[TMP4616]], 0 +// CHECK-NEXT: store i64 [[TMP4617]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4618:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4619:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4618]] seq_cst, align 8 +// CHECK-NEXT: [[TMP4620:%.*]] = icmp ugt i64 [[TMP4619]], [[TMP4618]] +// CHECK-NEXT: [[TMP4621:%.*]] = select i1 [[TMP4620]], i64 [[TMP4618]], i64 [[TMP4619]] +// CHECK-NEXT: store i64 [[TMP4621]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4622:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4623:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4622]] seq_cst, align 8 +// CHECK-NEXT: [[TMP4624:%.*]] = icmp ult i64 [[TMP4623]], [[TMP4622]] +// CHECK-NEXT: [[TMP4625:%.*]] = select i1 [[TMP4624]], i64 [[TMP4622]], i64 [[TMP4623]] +// CHECK-NEXT: store i64 [[TMP4625]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4626:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4627:%.*]] = atomicrmw umin i64* [[ULLX]], i64 [[TMP4626]] seq_cst, align 8 +// CHECK-NEXT: [[TMP4628:%.*]] = icmp ult i64 [[TMP4627]], [[TMP4626]] +// CHECK-NEXT: [[TMP4629:%.*]] = select i1 [[TMP4628]], i64 [[TMP4626]], i64 [[TMP4627]] +// CHECK-NEXT: store i64 [[TMP4629]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4630:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4631:%.*]] = atomicrmw umax i64* [[ULLX]], i64 [[TMP4630]] seq_cst, align 8 +// CHECK-NEXT: [[TMP4632:%.*]] = icmp ugt i64 [[TMP4631]], [[TMP4630]] +// CHECK-NEXT: [[TMP4633:%.*]] = select i1 [[TMP4632]], i64 [[TMP4630]], i64 [[TMP4631]] +// CHECK-NEXT: store i64 [[TMP4633]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4634:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4635:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4636:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4634]], i64 [[TMP4635]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4637:%.*]] = extractvalue { i64, i1 } [[TMP4636]], 0 +// CHECK-NEXT: [[TMP4638:%.*]] = extractvalue { i64, i1 } [[TMP4636]], 1 +// CHECK-NEXT: [[TMP4639:%.*]] = select i1 [[TMP4638]], i64 [[TMP4634]], i64 [[TMP4637]] +// CHECK-NEXT: store i64 [[TMP4639]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4640:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4641:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4642:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4640]], i64 [[TMP4641]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4643:%.*]] = extractvalue { i64, i1 } [[TMP4642]], 0 +// CHECK-NEXT: [[TMP4644:%.*]] = extractvalue { i64, i1 } [[TMP4642]], 1 +// CHECK-NEXT: [[TMP4645:%.*]] = select i1 [[TMP4644]], i64 [[TMP4640]], i64 [[TMP4643]] +// CHECK-NEXT: store i64 [[TMP4645]], i64* [[ULLV]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4646:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4647:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4648:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4646]], i64 [[TMP4647]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4649:%.*]] = extractvalue { i64, i1 } [[TMP4648]], 0 +// CHECK-NEXT: [[TMP4650:%.*]] = extractvalue { i64, i1 } [[TMP4648]], 1 +// CHECK-NEXT: br i1 [[TMP4650]], label [[ULLX_ATOMIC_EXIT453:%.*]], label [[ULLX_ATOMIC_CONT454:%.*]] +// CHECK: ullx.atomic.cont454: +// CHECK-NEXT: store i64 [[TMP4649]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT453]] +// CHECK: ullx.atomic.exit453: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4651:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4652:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4653:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4651]], i64 [[TMP4652]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4654:%.*]] = extractvalue { i64, i1 } [[TMP4653]], 0 +// CHECK-NEXT: [[TMP4655:%.*]] = extractvalue { i64, i1 } [[TMP4653]], 1 +// CHECK-NEXT: br i1 [[TMP4655]], label [[ULLX_ATOMIC_EXIT455:%.*]], label [[ULLX_ATOMIC_CONT456:%.*]] +// CHECK: ullx.atomic.cont456: +// CHECK-NEXT: store i64 [[TMP4654]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT455]] +// CHECK: ullx.atomic.exit455: +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4656:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4657:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4658:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4656]], i64 [[TMP4657]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4659:%.*]] = extractvalue { i64, i1 } [[TMP4658]], 1 +// CHECK-NEXT: [[TMP4660:%.*]] = zext i1 [[TMP4659]] to i64 +// CHECK-NEXT: store i64 [[TMP4660]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4661:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4662:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4663:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4661]], i64 [[TMP4662]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4664:%.*]] = extractvalue { i64, i1 } [[TMP4663]], 1 +// CHECK-NEXT: [[TMP4665:%.*]] = zext i1 [[TMP4664]] to i64 +// CHECK-NEXT: store i64 [[TMP4665]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4666:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4667:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4668:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4666]], i64 [[TMP4667]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4669:%.*]] = extractvalue { i64, i1 } [[TMP4668]], 0 +// CHECK-NEXT: [[TMP4670:%.*]] = extractvalue { i64, i1 } [[TMP4668]], 1 +// CHECK-NEXT: br i1 [[TMP4670]], label [[ULLX_ATOMIC_EXIT457:%.*]], label [[ULLX_ATOMIC_CONT458:%.*]] +// CHECK: ullx.atomic.cont458: +// CHECK-NEXT: store i64 [[TMP4669]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT457]] +// CHECK: ullx.atomic.exit457: +// CHECK-NEXT: [[TMP4671:%.*]] = extractvalue { i64, i1 } [[TMP4668]], 1 +// CHECK-NEXT: [[TMP4672:%.*]] = zext i1 [[TMP4671]] to i64 +// CHECK-NEXT: store i64 [[TMP4672]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: [[TMP4673:%.*]] = load i64, i64* [[ULLE]], align 8 +// CHECK-NEXT: [[TMP4674:%.*]] = load i64, i64* [[ULLD]], align 8 +// CHECK-NEXT: [[TMP4675:%.*]] = cmpxchg i64* [[ULLX]], i64 [[TMP4673]], i64 [[TMP4674]] seq_cst seq_cst, align 8 +// CHECK-NEXT: [[TMP4676:%.*]] = extractvalue { i64, i1 } [[TMP4675]], 0 +// CHECK-NEXT: [[TMP4677:%.*]] = extractvalue { i64, i1 } [[TMP4675]], 1 +// CHECK-NEXT: br i1 [[TMP4677]], label [[ULLX_ATOMIC_EXIT459:%.*]], label [[ULLX_ATOMIC_CONT460:%.*]] +// CHECK: ullx.atomic.cont460: +// CHECK-NEXT: store i64 [[TMP4676]], i64* [[ULLV]], align 8 +// CHECK-NEXT: br label [[ULLX_ATOMIC_EXIT459]] +// CHECK: ullx.atomic.exit459: +// CHECK-NEXT: [[TMP4678:%.*]] = extractvalue { i64, i1 } [[TMP4675]], 1 +// CHECK-NEXT: [[TMP4679:%.*]] = zext i1 [[TMP4678]] to i64 +// CHECK-NEXT: store i64 [[TMP4679]], i64* [[ULLR]], align 8 +// CHECK-NEXT: call void @__kmpc_flush(%struct.ident_t* @[[GLOB1]]) +// CHECK-NEXT: ret void