diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h --- a/llvm/include/llvm/Analysis/ValueTracking.h +++ b/llvm/include/llvm/Analysis/ValueTracking.h @@ -599,6 +599,12 @@ return Result; } + /// Determine the pattern that a select with the given compare as its + /// predicate and given values as its true/false operands would match. + SelectPatternResult matchDecomposedSelectPattern( + CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, + Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0); + /// Return the canonical comparison predicate for the specified /// minimum/maximum flavor. CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -5067,11 +5067,19 @@ CmpInst *CmpI = dyn_cast(SI->getCondition()); if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; + Value *TrueVal = SI->getTrueValue(); + Value *FalseVal = SI->getFalseValue(); + + return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS, + CastOp, Depth); +} + +SelectPatternResult llvm::matchDecomposedSelectPattern( + CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, + Instruction::CastOps *CastOp, unsigned Depth) { CmpInst::Predicate Pred = CmpI->getPredicate(); Value *CmpLHS = CmpI->getOperand(0); Value *CmpRHS = CmpI->getOperand(1); - Value *TrueVal = SI->getTrueValue(); - Value *FalseVal = SI->getFalseValue(); FastMathFlags FMF; if (isa(CmpI)) FMF = CmpI->getFastMathFlags(); diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp --- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp @@ -130,19 +130,31 @@ } // end namespace llvm -/// Match a 'select' including an optional 'not' of the condition. -static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, - Value *&T, Value *&F) { - if (match(V, m_Select(m_Value(Cond), m_Value(T), m_Value(F)))) { - // Look through a 'not' of the condition operand by swapping true/false. - Value *CondNot; - if (match(Cond, m_Not(m_Value(CondNot)))) { - Cond = CondNot; - std::swap(T, F); - } - return true; +/// Match a 'select' including an optional 'not's of the condition. +static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A, + Value *&B, + SelectPatternFlavor &Flavor) { + // Return false if V is not even a select. + if (!match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B)))) + return false; + + // Look through a 'not' of the condition operand by swapping A/B. + Value *CondNot; + if (match(Cond, m_Not(m_Value(CondNot)))) { + Cond = CondNot; + std::swap(A, B); } - return false; + + // Set flavor if we find a match, or set it to unknown otherwise; in + // either case, return true to indicate that this is a select we can + // process. + Flavor = SPF_UNKNOWN; + if (auto *CmpI = dyn_cast(Cond)) + Flavor = matchDecomposedSelectPattern(CmpI, A, B, A, B).Flavor; + else + Flavor = SPF_UNKNOWN; + + return true; } unsigned DenseMapInfo::getHashValue(SimpleValue Val) { @@ -168,40 +180,41 @@ return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); } - // Hash min/max/abs (cmp + select) to allow for commuted operands. - // Min/max may also have non-canonical compare predicate (eg, the compare for - // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the - // compare. - Value *A, *B; - SelectPatternFlavor SPF = matchSelectPattern(Inst, A, B).Flavor; - // TODO: We should also detect FP min/max. - if (SPF == SPF_SMIN || SPF == SPF_SMAX || - SPF == SPF_UMIN || SPF == SPF_UMAX) { - if (A > B) - std::swap(A, B); - return hash_combine(Inst->getOpcode(), SPF, A, B); - } - if (SPF == SPF_ABS || SPF == SPF_NABS) { - // ABS/NABS always puts the input in A and its negation in B. - return hash_combine(Inst->getOpcode(), SPF, A, B); - } - // Hash general selects to allow matching commuted true/false operands. - Value *Cond, *TVal, *FVal; - if (matchSelectWithOptionalNotCond(Inst, Cond, TVal, FVal)) { + SelectPatternFlavor SPF; + Value *Cond, *A, *B; + if (matchSelectWithOptionalNotCond(Inst, Cond, A, B, SPF)) { + // Hash min/max/abs (cmp + select) to allow for commuted operands. + // Min/max may also have non-canonical compare predicate (eg, the compare for + // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the + // compare. + // TODO: We should also detect FP min/max. + if (SPF == SPF_SMIN || SPF == SPF_SMAX || + SPF == SPF_UMIN || SPF == SPF_UMAX) { + if (A > B) + std::swap(A, B); + return hash_combine(Inst->getOpcode(), SPF, A, B); + } + if (SPF == SPF_ABS || SPF == SPF_NABS) { + // ABS/NABS always puts the input in A and its negation in B. + return hash_combine(Inst->getOpcode(), SPF, A, B); + } + + // Hash general selects to allow matching commuted true/false operands. + // If we do not have a compare as the condition, just hash in the condition. CmpInst::Predicate Pred; Value *X, *Y; if (!match(Cond, m_Cmp(Pred, m_Value(X), m_Value(Y)))) - return hash_combine(Inst->getOpcode(), Cond, TVal, FVal); + return hash_combine(Inst->getOpcode(), Cond, A, B); // Similar to cmp normalization (above) - canonicalize the predicate value: - // select (icmp Pred, X, Y), T, F --> select (icmp InvPred, X, Y), F, T + // select (icmp Pred, X, Y), A, B --> select (icmp InvPred, X, Y), B, A if (CmpInst::getInversePredicate(Pred) < Pred) { Pred = CmpInst::getInversePredicate(Pred); - std::swap(TVal, FVal); + std::swap(A, B); } - return hash_combine(Inst->getOpcode(), Pred, X, Y, TVal, FVal); + return hash_combine(Inst->getOpcode(), Pred, X, Y, A, B); } if (CastInst *CI = dyn_cast(Inst)) @@ -227,7 +240,7 @@ hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); } -bool DenseMapInfo::isEqual(SimpleValue LHS, SimpleValue RHS) { +static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS) { Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; if (LHS.isSentinel() || RHS.isSentinel()) @@ -263,39 +276,47 @@ // Min/max/abs can occur with commuted operands, non-canonical predicates, // and/or non-canonical operands. - Value *LHSA, *LHSB; - SelectPatternFlavor LSPF = matchSelectPattern(LHSI, LHSA, LHSB).Flavor; - // TODO: We should also detect FP min/max. - if (LSPF == SPF_SMIN || LSPF == SPF_SMAX || - LSPF == SPF_UMIN || LSPF == SPF_UMAX || - LSPF == SPF_ABS || LSPF == SPF_NABS) { - Value *RHSA, *RHSB; - SelectPatternFlavor RSPF = matchSelectPattern(RHSI, RHSA, RHSB).Flavor; + // Selects can be non-trivially equivalent via inverted conditions and swaps. + SelectPatternFlavor LSPF, RSPF; + Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB; + if (matchSelectWithOptionalNotCond(LHSI, CondL, LHSA, LHSB, LSPF) && + matchSelectWithOptionalNotCond(RHSI, CondR, RHSA, RHSB, RSPF)) { if (LSPF == RSPF) { - // Abs results are placed in a defined order by matchSelectPattern. - if (LSPF == SPF_ABS || LSPF == SPF_NABS) + // TODO: We should also detect FP min/max. + if (LSPF == SPF_SMIN || LSPF == SPF_SMAX || + LSPF == SPF_UMIN || LSPF == SPF_UMAX) + return ((LHSA == RHSA && LHSB == RHSB) || + (LHSA == RHSB && LHSB == RHSA)); + + if (LSPF == SPF_ABS || LSPF == SPF_NABS) { + // Abs results are placed in a defined order by matchSelectPattern. return LHSA == RHSA && LHSB == RHSB; - return ((LHSA == RHSA && LHSB == RHSB) || - (LHSA == RHSB && LHSB == RHSA)); - } - } + } - // Selects can be non-trivially equivalent via inverted conditions and swaps. - Value *CondL, *CondR, *TrueL, *TrueR, *FalseL, *FalseR; - if (matchSelectWithOptionalNotCond(LHSI, CondL, TrueL, FalseL) && - matchSelectWithOptionalNotCond(RHSI, CondR, TrueR, FalseR)) { - // select Cond, T, F <--> select not(Cond), F, T - if (CondL == CondR && TrueL == TrueR && FalseL == FalseR) - return true; + // select Cond, A, B <--> select not(Cond), B, A + if (CondL == CondR && LHSA == RHSA && LHSB == RHSB) + return true; + } // If the true/false operands are swapped and the conditions are compares // with inverted predicates, the selects are equal: - // select (icmp Pred, X, Y), T, F <--> select (icmp InvPred, X, Y), F, T + // select (icmp Pred, X, Y), A, B <--> select (icmp InvPred, X, Y), B, A + // + // This also handles patterns with a double-negation in the sense of not + + // inverse, because we looked through a 'not' in the matching function and + // swapped A/B: + // select (cmp Pred, X, Y), A, B <--> select (not (cmp InvPred, X, Y)), B, A // - // This also handles patterns with a double-negation because we looked - // through a 'not' in the matching function and swapped T/F: - // select (cmp Pred, X, Y), T, F <--> select (not (cmp InvPred, X, Y)), T, F - if (TrueL == FalseR && FalseL == TrueR) { + // This intentionally does NOT handle patterns with a double-negation in + // the sense of not + not, because doing so could result in values + // comparing + // as equal that hash differently in the min/max/abs cases like: + // select (cmp slt, X, Y), X, Y <--> select (not (not (cmp slt, X, Y))), X, Y + // ^ hashes as min ^ would not hash as min + // In the context of the EarlyCSE pass, however, such cases never reach + // this code, as we simplify the double-negation before hashing the second + // select (and so still succeed at CSEing them). + if (LHSA == RHSB && LHSB == RHSA) { CmpInst::Predicate PredL, PredR; Value *X, *Y; if (match(CondL, m_Cmp(PredL, m_Value(X), m_Value(Y))) && @@ -308,6 +329,15 @@ return false; } +bool DenseMapInfo::isEqual(SimpleValue LHS, SimpleValue RHS) { + // These comparisons are nontrivial, so assert that equality implies + // hash equality (DenseMap demands this as an invariant). + bool Result = isEqualImpl(LHS, RHS); + assert(!Result || (LHS.isSentinel() && LHS.Inst == RHS.Inst) || + DenseMapInfo::getHashValue(LHS) == DenseMapInfo::getHashValue(RHS)); + return Result; +} + //===----------------------------------------------------------------------===// // CallValue //===----------------------------------------------------------------------===// diff --git a/llvm/test/Transforms/EarlyCSE/commute.ll b/llvm/test/Transforms/EarlyCSE/commute.ll --- a/llvm/test/Transforms/EarlyCSE/commute.ll +++ b/llvm/test/Transforms/EarlyCSE/commute.ll @@ -108,14 +108,13 @@ } ; Min/max can also have an inverted predicate and select operands. -; TODO: Ensure we always recognize this (currently depends on hash collision) define i1 @smin_inverted(i8 %a, i8 %b) { ; CHECK-LABEL: @smin_inverted( ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]] -; CHECK: ret i1 +; CHECK-NEXT: ret i1 true ; %cmp1 = icmp slt i8 %a, %b %cmp2 = xor i1 %cmp1, -1 @@ -155,13 +154,12 @@ ret i8 %r } -; TODO: Ensure we always recognize this (currently depends on hash collision) define i1 @smax_inverted(i8 %a, i8 %b) { ; CHECK-LABEL: @smax_inverted( ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i8 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]] -; CHECK: ret i1 +; CHECK-NEXT: ret i1 true ; %cmp1 = icmp sgt i8 %a, %b %cmp2 = xor i1 %cmp1, -1 @@ -203,13 +201,12 @@ ret <2 x i8> %r } -; TODO: Ensure we always recognize this (currently depends on hash collision) define i1 @umin_inverted(i8 %a, i8 %b) { ; CHECK-LABEL: @umin_inverted( ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i8 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]] -; CHECK: ret i1 +; CHECK-NEXT: ret i1 true ; %cmp1 = icmp ult i8 %a, %b %cmp2 = xor i1 %cmp1, -1 @@ -250,13 +247,12 @@ ret i8 %r } -; TODO: Ensure we always recognize this (currently depends on hash collision) define i1 @umax_inverted(i8 %a, i8 %b) { ; CHECK-LABEL: @umax_inverted( ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i8 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[B]] -; CHECK: ret i1 +; CHECK-NEXT: ret i1 true ; %cmp1 = icmp ugt i8 %a, %b %cmp2 = xor i1 %cmp1, -1 @@ -302,14 +298,13 @@ ret i8 %r } -; TODO: Ensure we always recognize this (currently depends on hash collision) define i8 @abs_inverted(i8 %a) { ; CHECK-LABEL: @abs_inverted( ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]] ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i8 [[A]], 0 ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[NEG]] -; CHECK: ret i8 +; CHECK-NEXT: ret i8 [[M1]] ; %neg = sub i8 0, %a %cmp1 = icmp sgt i8 %a, 0 @@ -337,14 +332,13 @@ ret i8 %r } -; TODO: Ensure we always recognize this (currently depends on hash collision) define i8 @nabs_inverted(i8 %a) { ; CHECK-LABEL: @nabs_inverted( ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]] ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8 [[A]], 0 ; CHECK-NEXT: [[CMP2:%.*]] = xor i1 [[CMP1]], true ; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 [[A]], i8 [[NEG]] -; CHECK: ret i8 +; CHECK-NEXT: ret i8 0 ; %neg = sub i8 0, %a %cmp1 = icmp slt i8 %a, 0 @@ -646,3 +640,879 @@ %r = sub i32 %m2, %m1 ret i32 %r } + + +; This test is a reproducer for a bug involving inverted min/max selects +; hashing differently but comparing as equal. It generates several equivalent +; pairs with the intent that eventually the hash table will fill enough for +; the probe paths of an equivalent pair to collide and the hash mismatch +; to be noticed. It also includes a negation of each negation to check for +; the same issue one level deeper. +define void @many_min_max(i32* %px, i32* %py, i32* %pout) { + + %x1 = load volatile i32, i32* %px + %y1 = load volatile i32, i32* %py + %cmp1a = icmp slt i32 %x1, %y1 + %cmp1b = xor i1 %cmp1a, -1 + %cmp1c = xor i1 %cmp1b, -1 + %r1a = select i1 %cmp1a, i32 %x1, i32 %y1 + %r1b = select i1 %cmp1b, i32 %y1, i32 %x1 + %r1c = select i1 %cmp1c, i32 %x1, i32 %y1 + store volatile i32 %r1a, i32* %pout + store volatile i32 %r1b, i32* %pout + store volatile i32 %r1c, i32* %pout + + %x2 = load volatile i32, i32* %px + %y2 = load volatile i32, i32* %py + %cmp2a = icmp sgt i32 %x2, %y2 + %cmp2b = xor i1 %cmp2a, -1 + %cmp2c = xor i1 %cmp2b, -1 + %r2a = select i1 %cmp2a, i32 %x2, i32 %y2 + %r2b = select i1 %cmp2b, i32 %y2, i32 %x2 + %r2c = select i1 %cmp2c, i32 %x2, i32 %y2 + store volatile i32 %r2a, i32* %pout + store volatile i32 %r2b, i32* %pout + store volatile i32 %r2c, i32* %pout + + %x3 = load volatile i32, i32* %px + %y3 = load volatile i32, i32* %py + %cmp3a = icmp slt i32 %x3, %y3 + %cmp3b = xor i1 %cmp3a, -1 + %cmp3c = xor i1 %cmp3b, -1 + %r3a = select i1 %cmp3a, i32 %x3, i32 %y3 + %r3b = select i1 %cmp3b, i32 %y3, i32 %x3 + %r3c = select i1 %cmp3c, i32 %x3, i32 %y3 + store volatile i32 %r3a, i32* %pout + store volatile i32 %r3b, i32* %pout + store volatile i32 %r3c, i32* %pout + + %x4 = load volatile i32, i32* %px + %y4 = load volatile i32, i32* %py + %cmp4a = icmp sgt i32 %x4, %y4 + %cmp4b = xor i1 %cmp4a, -1 + %cmp4c = xor i1 %cmp4b, -1 + %r4a = select i1 %cmp4a, i32 %x4, i32 %y4 + %r4b = select i1 %cmp4b, i32 %y4, i32 %x4 + %r4c = select i1 %cmp4c, i32 %x4, i32 %y4 + store volatile i32 %r4a, i32* %pout + store volatile i32 %r4b, i32* %pout + store volatile i32 %r4c, i32* %pout + + %x5 = load volatile i32, i32* %px + %y5 = load volatile i32, i32* %py + %cmp5a = icmp slt i32 %x5, %y5 + %cmp5b = xor i1 %cmp5a, -1 + %cmp5c = xor i1 %cmp5b, -1 + %r5a = select i1 %cmp5a, i32 %x5, i32 %y5 + %r5b = select i1 %cmp5b, i32 %y5, i32 %x5 + %r5c = select i1 %cmp5c, i32 %x5, i32 %y5 + store volatile i32 %r5a, i32* %pout + store volatile i32 %r5b, i32* %pout + store volatile i32 %r5c, i32* %pout + + %x6 = load volatile i32, i32* %px + %y6 = load volatile i32, i32* %py + %cmp6a = icmp sgt i32 %x6, %y6 + %cmp6b = xor i1 %cmp6a, -1 + %cmp6c = xor i1 %cmp6b, -1 + %r6a = select i1 %cmp6a, i32 %x6, i32 %y6 + %r6b = select i1 %cmp6b, i32 %y6, i32 %x6 + %r6c = select i1 %cmp6c, i32 %x6, i32 %y6 + store volatile i32 %r6a, i32* %pout + store volatile i32 %r6b, i32* %pout + store volatile i32 %r6c, i32* %pout + + %x7 = load volatile i32, i32* %px + %y7 = load volatile i32, i32* %py + %cmp7a = icmp slt i32 %x7, %y7 + %cmp7b = xor i1 %cmp7a, -1 + %cmp7c = xor i1 %cmp7b, -1 + %r7a = select i1 %cmp7a, i32 %x7, i32 %y7 + %r7b = select i1 %cmp7b, i32 %y7, i32 %x7 + %r7c = select i1 %cmp7c, i32 %x7, i32 %y7 + store volatile i32 %r7a, i32* %pout + store volatile i32 %r7b, i32* %pout + store volatile i32 %r7c, i32* %pout + + %x8 = load volatile i32, i32* %px + %y8 = load volatile i32, i32* %py + %cmp8a = icmp sgt i32 %x8, %y8 + %cmp8b = xor i1 %cmp8a, -1 + %cmp8c = xor i1 %cmp8b, -1 + %r8a = select i1 %cmp8a, i32 %x8, i32 %y8 + %r8b = select i1 %cmp8b, i32 %y8, i32 %x8 + %r8c = select i1 %cmp8c, i32 %x8, i32 %y8 + store volatile i32 %r8a, i32* %pout + store volatile i32 %r8b, i32* %pout + store volatile i32 %r8c, i32* %pout + + %x9 = load volatile i32, i32* %px + %y9 = load volatile i32, i32* %py + %cmp9a = icmp slt i32 %x9, %y9 + %cmp9b = xor i1 %cmp9a, -1 + %cmp9c = xor i1 %cmp9b, -1 + %r9a = select i1 %cmp9a, i32 %x9, i32 %y9 + %r9b = select i1 %cmp9b, i32 %y9, i32 %x9 + %r9c = select i1 %cmp9c, i32 %x9, i32 %y9 + store volatile i32 %r9a, i32* %pout + store volatile i32 %r9b, i32* %pout + store volatile i32 %r9c, i32* %pout + + %x10 = load volatile i32, i32* %px + %y10 = load volatile i32, i32* %py + %cmp10a = icmp sgt i32 %x10, %y10 + %cmp10b = xor i1 %cmp10a, -1 + %cmp10c = xor i1 %cmp10b, -1 + %r10a = select i1 %cmp10a, i32 %x10, i32 %y10 + %r10b = select i1 %cmp10b, i32 %y10, i32 %x10 + %r10c = select i1 %cmp10c, i32 %x10, i32 %y10 + store volatile i32 %r10a, i32* %pout + store volatile i32 %r10b, i32* %pout + store volatile i32 %r10c, i32* %pout + + %x11 = load volatile i32, i32* %px + %y11 = load volatile i32, i32* %py + %cmp11a = icmp slt i32 %x11, %y11 + %cmp11b = xor i1 %cmp11a, -1 + %cmp11c = xor i1 %cmp11b, -1 + %r11a = select i1 %cmp11a, i32 %x11, i32 %y11 + %r11b = select i1 %cmp11b, i32 %y11, i32 %x11 + %r11c = select i1 %cmp11c, i32 %x11, i32 %y11 + store volatile i32 %r11a, i32* %pout + store volatile i32 %r11b, i32* %pout + store volatile i32 %r11c, i32* %pout + + %x12 = load volatile i32, i32* %px + %y12 = load volatile i32, i32* %py + %cmp12a = icmp sgt i32 %x12, %y12 + %cmp12b = xor i1 %cmp12a, -1 + %cmp12c = xor i1 %cmp12b, -1 + %r12a = select i1 %cmp12a, i32 %x12, i32 %y12 + %r12b = select i1 %cmp12b, i32 %y12, i32 %x12 + %r12c = select i1 %cmp12c, i32 %x12, i32 %y12 + store volatile i32 %r12a, i32* %pout + store volatile i32 %r12b, i32* %pout + store volatile i32 %r12c, i32* %pout + + %x13 = load volatile i32, i32* %px + %y13 = load volatile i32, i32* %py + %cmp13a = icmp slt i32 %x13, %y13 + %cmp13b = xor i1 %cmp13a, -1 + %cmp13c = xor i1 %cmp13b, -1 + %r13a = select i1 %cmp13a, i32 %x13, i32 %y13 + %r13b = select i1 %cmp13b, i32 %y13, i32 %x13 + %r13c = select i1 %cmp13c, i32 %x13, i32 %y13 + store volatile i32 %r13a, i32* %pout + store volatile i32 %r13b, i32* %pout + store volatile i32 %r13c, i32* %pout + + %x14 = load volatile i32, i32* %px + %y14 = load volatile i32, i32* %py + %cmp14a = icmp sgt i32 %x14, %y14 + %cmp14b = xor i1 %cmp14a, -1 + %cmp14c = xor i1 %cmp14b, -1 + %r14a = select i1 %cmp14a, i32 %x14, i32 %y14 + %r14b = select i1 %cmp14b, i32 %y14, i32 %x14 + %r14c = select i1 %cmp14c, i32 %x14, i32 %y14 + store volatile i32 %r14a, i32* %pout + store volatile i32 %r14b, i32* %pout + store volatile i32 %r14c, i32* %pout + + %x15 = load volatile i32, i32* %px + %y15 = load volatile i32, i32* %py + %cmp15a = icmp slt i32 %x15, %y15 + %cmp15b = xor i1 %cmp15a, -1 + %cmp15c = xor i1 %cmp15b, -1 + %r15a = select i1 %cmp15a, i32 %x15, i32 %y15 + %r15b = select i1 %cmp15b, i32 %y15, i32 %x15 + %r15c = select i1 %cmp15c, i32 %x15, i32 %y15 + store volatile i32 %r15a, i32* %pout + store volatile i32 %r15b, i32* %pout + store volatile i32 %r15c, i32* %pout + + %x16 = load volatile i32, i32* %px + %y16 = load volatile i32, i32* %py + %cmp16a = icmp sgt i32 %x16, %y16 + %cmp16b = xor i1 %cmp16a, -1 + %cmp16c = xor i1 %cmp16b, -1 + %r16a = select i1 %cmp16a, i32 %x16, i32 %y16 + %r16b = select i1 %cmp16b, i32 %y16, i32 %x16 + %r16c = select i1 %cmp16c, i32 %x16, i32 %y16 + store volatile i32 %r16a, i32* %pout + store volatile i32 %r16b, i32* %pout + store volatile i32 %r16c, i32* %pout + + %x17 = load volatile i32, i32* %px + %y17 = load volatile i32, i32* %py + %cmp17a = icmp slt i32 %x17, %y17 + %cmp17b = xor i1 %cmp17a, -1 + %cmp17c = xor i1 %cmp17b, -1 + %r17a = select i1 %cmp17a, i32 %x17, i32 %y17 + %r17b = select i1 %cmp17b, i32 %y17, i32 %x17 + %r17c = select i1 %cmp17c, i32 %x17, i32 %y17 + store volatile i32 %r17a, i32* %pout + store volatile i32 %r17b, i32* %pout + store volatile i32 %r17c, i32* %pout + + %x18 = load volatile i32, i32* %px + %y18 = load volatile i32, i32* %py + %cmp18a = icmp sgt i32 %x18, %y18 + %cmp18b = xor i1 %cmp18a, -1 + %cmp18c = xor i1 %cmp18b, -1 + %r18a = select i1 %cmp18a, i32 %x18, i32 %y18 + %r18b = select i1 %cmp18b, i32 %y18, i32 %x18 + %r18c = select i1 %cmp18c, i32 %x18, i32 %y18 + store volatile i32 %r18a, i32* %pout + store volatile i32 %r18b, i32* %pout + store volatile i32 %r18c, i32* %pout + + %x19 = load volatile i32, i32* %px + %y19 = load volatile i32, i32* %py + %cmp19a = icmp slt i32 %x19, %y19 + %cmp19b = xor i1 %cmp19a, -1 + %cmp19c = xor i1 %cmp19b, -1 + %r19a = select i1 %cmp19a, i32 %x19, i32 %y19 + %r19b = select i1 %cmp19b, i32 %y19, i32 %x19 + %r19c = select i1 %cmp19c, i32 %x19, i32 %y19 + store volatile i32 %r19a, i32* %pout + store volatile i32 %r19b, i32* %pout + store volatile i32 %r19c, i32* %pout + + %x20 = load volatile i32, i32* %px + %y20 = load volatile i32, i32* %py + %cmp20a = icmp sgt i32 %x20, %y20 + %cmp20b = xor i1 %cmp20a, -1 + %cmp20c = xor i1 %cmp20b, -1 + %r20a = select i1 %cmp20a, i32 %x20, i32 %y20 + %r20b = select i1 %cmp20b, i32 %y20, i32 %x20 + %r20c = select i1 %cmp20c, i32 %x20, i32 %y20 + store volatile i32 %r20a, i32* %pout + store volatile i32 %r20b, i32* %pout + store volatile i32 %r20c, i32* %pout + + %x21 = load volatile i32, i32* %px + %y21 = load volatile i32, i32* %py + %cmp21a = icmp slt i32 %x21, %y21 + %cmp21b = xor i1 %cmp21a, -1 + %cmp21c = xor i1 %cmp21b, -1 + %r21a = select i1 %cmp21a, i32 %x21, i32 %y21 + %r21b = select i1 %cmp21b, i32 %y21, i32 %x21 + %r21c = select i1 %cmp21c, i32 %x21, i32 %y21 + store volatile i32 %r21a, i32* %pout + store volatile i32 %r21b, i32* %pout + store volatile i32 %r21c, i32* %pout + + %x22 = load volatile i32, i32* %px + %y22 = load volatile i32, i32* %py + %cmp22a = icmp sgt i32 %x22, %y22 + %cmp22b = xor i1 %cmp22a, -1 + %cmp22c = xor i1 %cmp22b, -1 + %r22a = select i1 %cmp22a, i32 %x22, i32 %y22 + %r22b = select i1 %cmp22b, i32 %y22, i32 %x22 + %r22c = select i1 %cmp22c, i32 %x22, i32 %y22 + store volatile i32 %r22a, i32* %pout + store volatile i32 %r22b, i32* %pout + store volatile i32 %r22c, i32* %pout + + %x23 = load volatile i32, i32* %px + %y23 = load volatile i32, i32* %py + %cmp23a = icmp slt i32 %x23, %y23 + %cmp23b = xor i1 %cmp23a, -1 + %cmp23c = xor i1 %cmp23b, -1 + %r23a = select i1 %cmp23a, i32 %x23, i32 %y23 + %r23b = select i1 %cmp23b, i32 %y23, i32 %x23 + %r23c = select i1 %cmp23c, i32 %x23, i32 %y23 + store volatile i32 %r23a, i32* %pout + store volatile i32 %r23b, i32* %pout + store volatile i32 %r23c, i32* %pout + + %x24 = load volatile i32, i32* %px + %y24 = load volatile i32, i32* %py + %cmp24a = icmp sgt i32 %x24, %y24 + %cmp24b = xor i1 %cmp24a, -1 + %cmp24c = xor i1 %cmp24b, -1 + %r24a = select i1 %cmp24a, i32 %x24, i32 %y24 + %r24b = select i1 %cmp24b, i32 %y24, i32 %x24 + %r24c = select i1 %cmp24c, i32 %x24, i32 %y24 + store volatile i32 %r24a, i32* %pout + store volatile i32 %r24b, i32* %pout + store volatile i32 %r24c, i32* %pout + + %x25 = load volatile i32, i32* %px + %y25 = load volatile i32, i32* %py + %cmp25a = icmp slt i32 %x25, %y25 + %cmp25b = xor i1 %cmp25a, -1 + %cmp25c = xor i1 %cmp25b, -1 + %r25a = select i1 %cmp25a, i32 %x25, i32 %y25 + %r25b = select i1 %cmp25b, i32 %y25, i32 %x25 + %r25c = select i1 %cmp25c, i32 %x25, i32 %y25 + store volatile i32 %r25a, i32* %pout + store volatile i32 %r25b, i32* %pout + store volatile i32 %r25c, i32* %pout + + %x26 = load volatile i32, i32* %px + %y26 = load volatile i32, i32* %py + %cmp26a = icmp sgt i32 %x26, %y26 + %cmp26b = xor i1 %cmp26a, -1 + %cmp26c = xor i1 %cmp26b, -1 + %r26a = select i1 %cmp26a, i32 %x26, i32 %y26 + %r26b = select i1 %cmp26b, i32 %y26, i32 %x26 + %r26c = select i1 %cmp26c, i32 %x26, i32 %y26 + store volatile i32 %r26a, i32* %pout + store volatile i32 %r26b, i32* %pout + store volatile i32 %r26c, i32* %pout + + %x27 = load volatile i32, i32* %px + %y27 = load volatile i32, i32* %py + %cmp27a = icmp slt i32 %x27, %y27 + %cmp27b = xor i1 %cmp27a, -1 + %cmp27c = xor i1 %cmp27b, -1 + %r27a = select i1 %cmp27a, i32 %x27, i32 %y27 + %r27b = select i1 %cmp27b, i32 %y27, i32 %x27 + %r27c = select i1 %cmp27c, i32 %x27, i32 %y27 + store volatile i32 %r27a, i32* %pout + store volatile i32 %r27b, i32* %pout + store volatile i32 %r27c, i32* %pout + + %x28 = load volatile i32, i32* %px + %y28 = load volatile i32, i32* %py + %cmp28a = icmp sgt i32 %x28, %y28 + %cmp28b = xor i1 %cmp28a, -1 + %cmp28c = xor i1 %cmp28b, -1 + %r28a = select i1 %cmp28a, i32 %x28, i32 %y28 + %r28b = select i1 %cmp28b, i32 %y28, i32 %x28 + %r28c = select i1 %cmp28c, i32 %x28, i32 %y28 + store volatile i32 %r28a, i32* %pout + store volatile i32 %r28b, i32* %pout + store volatile i32 %r28c, i32* %pout + + %x29 = load volatile i32, i32* %px + %y29 = load volatile i32, i32* %py + %cmp29a = icmp slt i32 %x29, %y29 + %cmp29b = xor i1 %cmp29a, -1 + %cmp29c = xor i1 %cmp29b, -1 + %r29a = select i1 %cmp29a, i32 %x29, i32 %y29 + %r29b = select i1 %cmp29b, i32 %y29, i32 %x29 + %r29c = select i1 %cmp29c, i32 %x29, i32 %y29 + store volatile i32 %r29a, i32* %pout + store volatile i32 %r29b, i32* %pout + store volatile i32 %r29c, i32* %pout + + %x30 = load volatile i32, i32* %px + %y30 = load volatile i32, i32* %py + %cmp30a = icmp sgt i32 %x30, %y30 + %cmp30b = xor i1 %cmp30a, -1 + %cmp30c = xor i1 %cmp30b, -1 + %r30a = select i1 %cmp30a, i32 %x30, i32 %y30 + %r30b = select i1 %cmp30b, i32 %y30, i32 %x30 + %r30c = select i1 %cmp30c, i32 %x30, i32 %y30 + store volatile i32 %r30a, i32* %pout + store volatile i32 %r30b, i32* %pout + store volatile i32 %r30c, i32* %pout + + %x31 = load volatile i32, i32* %px + %y31 = load volatile i32, i32* %py + %cmp31a = icmp slt i32 %x31, %y31 + %cmp31b = xor i1 %cmp31a, -1 + %cmp31c = xor i1 %cmp31b, -1 + %r31a = select i1 %cmp31a, i32 %x31, i32 %y31 + %r31b = select i1 %cmp31b, i32 %y31, i32 %x31 + %r31c = select i1 %cmp31c, i32 %x31, i32 %y31 + store volatile i32 %r31a, i32* %pout + store volatile i32 %r31b, i32* %pout + store volatile i32 %r31c, i32* %pout + + %x32 = load volatile i32, i32* %px + %y32 = load volatile i32, i32* %py + %cmp32a = icmp sgt i32 %x32, %y32 + %cmp32b = xor i1 %cmp32a, -1 + %cmp32c = xor i1 %cmp32b, -1 + %r32a = select i1 %cmp32a, i32 %x32, i32 %y32 + %r32b = select i1 %cmp32b, i32 %y32, i32 %x32 + %r32c = select i1 %cmp32c, i32 %x32, i32 %y32 + store volatile i32 %r32a, i32* %pout + store volatile i32 %r32b, i32* %pout + store volatile i32 %r32c, i32* %pout + + %x33 = load volatile i32, i32* %px + %y33 = load volatile i32, i32* %py + %cmp33a = icmp slt i32 %x33, %y33 + %cmp33b = xor i1 %cmp33a, -1 + %cmp33c = xor i1 %cmp33b, -1 + %r33a = select i1 %cmp33a, i32 %x33, i32 %y33 + %r33b = select i1 %cmp33b, i32 %y33, i32 %x33 + %r33c = select i1 %cmp33c, i32 %x33, i32 %y33 + store volatile i32 %r33a, i32* %pout + store volatile i32 %r33b, i32* %pout + store volatile i32 %r33c, i32* %pout + + %x34 = load volatile i32, i32* %px + %y34 = load volatile i32, i32* %py + %cmp34a = icmp sgt i32 %x34, %y34 + %cmp34b = xor i1 %cmp34a, -1 + %cmp34c = xor i1 %cmp34b, -1 + %r34a = select i1 %cmp34a, i32 %x34, i32 %y34 + %r34b = select i1 %cmp34b, i32 %y34, i32 %x34 + %r34c = select i1 %cmp34c, i32 %x34, i32 %y34 + store volatile i32 %r34a, i32* %pout + store volatile i32 %r34b, i32* %pout + store volatile i32 %r34c, i32* %pout + + %x35 = load volatile i32, i32* %px + %y35 = load volatile i32, i32* %py + %cmp35a = icmp slt i32 %x35, %y35 + %cmp35b = xor i1 %cmp35a, -1 + %cmp35c = xor i1 %cmp35b, -1 + %r35a = select i1 %cmp35a, i32 %x35, i32 %y35 + %r35b = select i1 %cmp35b, i32 %y35, i32 %x35 + %r35c = select i1 %cmp35c, i32 %x35, i32 %y35 + store volatile i32 %r35a, i32* %pout + store volatile i32 %r35b, i32* %pout + store volatile i32 %r35c, i32* %pout + + %x36 = load volatile i32, i32* %px + %y36 = load volatile i32, i32* %py + %cmp36a = icmp sgt i32 %x36, %y36 + %cmp36b = xor i1 %cmp36a, -1 + %cmp36c = xor i1 %cmp36b, -1 + %r36a = select i1 %cmp36a, i32 %x36, i32 %y36 + %r36b = select i1 %cmp36b, i32 %y36, i32 %x36 + %r36c = select i1 %cmp36c, i32 %x36, i32 %y36 + store volatile i32 %r36a, i32* %pout + store volatile i32 %r36b, i32* %pout + store volatile i32 %r36c, i32* %pout + + %x37 = load volatile i32, i32* %px + %y37 = load volatile i32, i32* %py + %cmp37a = icmp slt i32 %x37, %y37 + %cmp37b = xor i1 %cmp37a, -1 + %cmp37c = xor i1 %cmp37b, -1 + %r37a = select i1 %cmp37a, i32 %x37, i32 %y37 + %r37b = select i1 %cmp37b, i32 %y37, i32 %x37 + %r37c = select i1 %cmp37c, i32 %x37, i32 %y37 + store volatile i32 %r37a, i32* %pout + store volatile i32 %r37b, i32* %pout + store volatile i32 %r37c, i32* %pout + + %x38 = load volatile i32, i32* %px + %y38 = load volatile i32, i32* %py + %cmp38a = icmp sgt i32 %x38, %y38 + %cmp38b = xor i1 %cmp38a, -1 + %cmp38c = xor i1 %cmp38b, -1 + %r38a = select i1 %cmp38a, i32 %x38, i32 %y38 + %r38b = select i1 %cmp38b, i32 %y38, i32 %x38 + %r38c = select i1 %cmp38c, i32 %x38, i32 %y38 + store volatile i32 %r38a, i32* %pout + store volatile i32 %r38b, i32* %pout + store volatile i32 %r38c, i32* %pout + + %x39 = load volatile i32, i32* %px + %y39 = load volatile i32, i32* %py + %cmp39a = icmp slt i32 %x39, %y39 + %cmp39b = xor i1 %cmp39a, -1 + %cmp39c = xor i1 %cmp39b, -1 + %r39a = select i1 %cmp39a, i32 %x39, i32 %y39 + %r39b = select i1 %cmp39b, i32 %y39, i32 %x39 + %r39c = select i1 %cmp39c, i32 %x39, i32 %y39 + store volatile i32 %r39a, i32* %pout + store volatile i32 %r39b, i32* %pout + store volatile i32 %r39c, i32* %pout + + %x40 = load volatile i32, i32* %px + %y40 = load volatile i32, i32* %py + %cmp40a = icmp sgt i32 %x40, %y40 + %cmp40b = xor i1 %cmp40a, -1 + %cmp40c = xor i1 %cmp40b, -1 + %r40a = select i1 %cmp40a, i32 %x40, i32 %y40 + %r40b = select i1 %cmp40b, i32 %y40, i32 %x40 + %r40c = select i1 %cmp40c, i32 %x40, i32 %y40 + store volatile i32 %r40a, i32* %pout + store volatile i32 %r40b, i32* %pout + store volatile i32 %r40c, i32* %pout + + %x41 = load volatile i32, i32* %px + %y41 = load volatile i32, i32* %py + %cmp41a = icmp slt i32 %x41, %y41 + %cmp41b = xor i1 %cmp41a, -1 + %cmp41c = xor i1 %cmp41b, -1 + %r41a = select i1 %cmp41a, i32 %x41, i32 %y41 + %r41b = select i1 %cmp41b, i32 %y41, i32 %x41 + %r41c = select i1 %cmp41c, i32 %x41, i32 %y41 + store volatile i32 %r41a, i32* %pout + store volatile i32 %r41b, i32* %pout + store volatile i32 %r41c, i32* %pout + + %x42 = load volatile i32, i32* %px + %y42 = load volatile i32, i32* %py + %cmp42a = icmp sgt i32 %x42, %y42 + %cmp42b = xor i1 %cmp42a, -1 + %cmp42c = xor i1 %cmp42b, -1 + %r42a = select i1 %cmp42a, i32 %x42, i32 %y42 + %r42b = select i1 %cmp42b, i32 %y42, i32 %x42 + %r42c = select i1 %cmp42c, i32 %x42, i32 %y42 + store volatile i32 %r42a, i32* %pout + store volatile i32 %r42b, i32* %pout + store volatile i32 %r42c, i32* %pout + + %x43 = load volatile i32, i32* %px + %y43 = load volatile i32, i32* %py + %cmp43a = icmp slt i32 %x43, %y43 + %cmp43b = xor i1 %cmp43a, -1 + %cmp43c = xor i1 %cmp43b, -1 + %r43a = select i1 %cmp43a, i32 %x43, i32 %y43 + %r43b = select i1 %cmp43b, i32 %y43, i32 %x43 + %r43c = select i1 %cmp43c, i32 %x43, i32 %y43 + store volatile i32 %r43a, i32* %pout + store volatile i32 %r43b, i32* %pout + store volatile i32 %r43c, i32* %pout + + %x44 = load volatile i32, i32* %px + %y44 = load volatile i32, i32* %py + %cmp44a = icmp sgt i32 %x44, %y44 + %cmp44b = xor i1 %cmp44a, -1 + %cmp44c = xor i1 %cmp44b, -1 + %r44a = select i1 %cmp44a, i32 %x44, i32 %y44 + %r44b = select i1 %cmp44b, i32 %y44, i32 %x44 + %r44c = select i1 %cmp44c, i32 %x44, i32 %y44 + store volatile i32 %r44a, i32* %pout + store volatile i32 %r44b, i32* %pout + store volatile i32 %r44c, i32* %pout + + %x45 = load volatile i32, i32* %px + %y45 = load volatile i32, i32* %py + %cmp45a = icmp slt i32 %x45, %y45 + %cmp45b = xor i1 %cmp45a, -1 + %cmp45c = xor i1 %cmp45b, -1 + %r45a = select i1 %cmp45a, i32 %x45, i32 %y45 + %r45b = select i1 %cmp45b, i32 %y45, i32 %x45 + %r45c = select i1 %cmp45c, i32 %x45, i32 %y45 + store volatile i32 %r45a, i32* %pout + store volatile i32 %r45b, i32* %pout + store volatile i32 %r45c, i32* %pout + + %x46 = load volatile i32, i32* %px + %y46 = load volatile i32, i32* %py + %cmp46a = icmp sgt i32 %x46, %y46 + %cmp46b = xor i1 %cmp46a, -1 + %cmp46c = xor i1 %cmp46b, -1 + %r46a = select i1 %cmp46a, i32 %x46, i32 %y46 + %r46b = select i1 %cmp46b, i32 %y46, i32 %x46 + %r46c = select i1 %cmp46c, i32 %x46, i32 %y46 + store volatile i32 %r46a, i32* %pout + store volatile i32 %r46b, i32* %pout + store volatile i32 %r46c, i32* %pout + + %x47 = load volatile i32, i32* %px + %y47 = load volatile i32, i32* %py + %cmp47a = icmp slt i32 %x47, %y47 + %cmp47b = xor i1 %cmp47a, -1 + %cmp47c = xor i1 %cmp47b, -1 + %r47a = select i1 %cmp47a, i32 %x47, i32 %y47 + %r47b = select i1 %cmp47b, i32 %y47, i32 %x47 + %r47c = select i1 %cmp47c, i32 %x47, i32 %y47 + store volatile i32 %r47a, i32* %pout + store volatile i32 %r47b, i32* %pout + store volatile i32 %r47c, i32* %pout + + %x48 = load volatile i32, i32* %px + %y48 = load volatile i32, i32* %py + %cmp48a = icmp sgt i32 %x48, %y48 + %cmp48b = xor i1 %cmp48a, -1 + %cmp48c = xor i1 %cmp48b, -1 + %r48a = select i1 %cmp48a, i32 %x48, i32 %y48 + %r48b = select i1 %cmp48b, i32 %y48, i32 %x48 + %r48c = select i1 %cmp48c, i32 %x48, i32 %y48 + store volatile i32 %r48a, i32* %pout + store volatile i32 %r48b, i32* %pout + store volatile i32 %r48c, i32* %pout + + %x49 = load volatile i32, i32* %px + %y49 = load volatile i32, i32* %py + %cmp49a = icmp slt i32 %x49, %y49 + %cmp49b = xor i1 %cmp49a, -1 + %cmp49c = xor i1 %cmp49b, -1 + %r49a = select i1 %cmp49a, i32 %x49, i32 %y49 + %r49b = select i1 %cmp49b, i32 %y49, i32 %x49 + %r49c = select i1 %cmp49c, i32 %x49, i32 %y49 + store volatile i32 %r49a, i32* %pout + store volatile i32 %r49b, i32* %pout + store volatile i32 %r49c, i32* %pout + + %x50 = load volatile i32, i32* %px + %y50 = load volatile i32, i32* %py + %cmp50a = icmp sgt i32 %x50, %y50 + %cmp50b = xor i1 %cmp50a, -1 + %cmp50c = xor i1 %cmp50b, -1 + %r50a = select i1 %cmp50a, i32 %x50, i32 %y50 + %r50b = select i1 %cmp50b, i32 %y50, i32 %x50 + %r50c = select i1 %cmp50c, i32 %x50, i32 %y50 + store volatile i32 %r50a, i32* %pout + store volatile i32 %r50b, i32* %pout + store volatile i32 %r50c, i32* %pout + + %x51 = load volatile i32, i32* %px + %y51 = load volatile i32, i32* %py + %cmp51a = icmp slt i32 %x51, %y51 + %cmp51b = xor i1 %cmp51a, -1 + %cmp51c = xor i1 %cmp51b, -1 + %r51a = select i1 %cmp51a, i32 %x51, i32 %y51 + %r51b = select i1 %cmp51b, i32 %y51, i32 %x51 + %r51c = select i1 %cmp51c, i32 %x51, i32 %y51 + store volatile i32 %r51a, i32* %pout + store volatile i32 %r51b, i32* %pout + store volatile i32 %r51c, i32* %pout + + %x52 = load volatile i32, i32* %px + %y52 = load volatile i32, i32* %py + %cmp52a = icmp sgt i32 %x52, %y52 + %cmp52b = xor i1 %cmp52a, -1 + %cmp52c = xor i1 %cmp52b, -1 + %r52a = select i1 %cmp52a, i32 %x52, i32 %y52 + %r52b = select i1 %cmp52b, i32 %y52, i32 %x52 + %r52c = select i1 %cmp52c, i32 %x52, i32 %y52 + store volatile i32 %r52a, i32* %pout + store volatile i32 %r52b, i32* %pout + store volatile i32 %r52c, i32* %pout + + %x53 = load volatile i32, i32* %px + %y53 = load volatile i32, i32* %py + %cmp53a = icmp slt i32 %x53, %y53 + %cmp53b = xor i1 %cmp53a, -1 + %cmp53c = xor i1 %cmp53b, -1 + %r53a = select i1 %cmp53a, i32 %x53, i32 %y53 + %r53b = select i1 %cmp53b, i32 %y53, i32 %x53 + %r53c = select i1 %cmp53c, i32 %x53, i32 %y53 + store volatile i32 %r53a, i32* %pout + store volatile i32 %r53b, i32* %pout + store volatile i32 %r53c, i32* %pout + + %x54 = load volatile i32, i32* %px + %y54 = load volatile i32, i32* %py + %cmp54a = icmp sgt i32 %x54, %y54 + %cmp54b = xor i1 %cmp54a, -1 + %cmp54c = xor i1 %cmp54b, -1 + %r54a = select i1 %cmp54a, i32 %x54, i32 %y54 + %r54b = select i1 %cmp54b, i32 %y54, i32 %x54 + %r54c = select i1 %cmp54c, i32 %x54, i32 %y54 + store volatile i32 %r54a, i32* %pout + store volatile i32 %r54b, i32* %pout + store volatile i32 %r54c, i32* %pout + + %x55 = load volatile i32, i32* %px + %y55 = load volatile i32, i32* %py + %cmp55a = icmp slt i32 %x55, %y55 + %cmp55b = xor i1 %cmp55a, -1 + %cmp55c = xor i1 %cmp55b, -1 + %r55a = select i1 %cmp55a, i32 %x55, i32 %y55 + %r55b = select i1 %cmp55b, i32 %y55, i32 %x55 + %r55c = select i1 %cmp55c, i32 %x55, i32 %y55 + store volatile i32 %r55a, i32* %pout + store volatile i32 %r55b, i32* %pout + store volatile i32 %r55c, i32* %pout + + %x56 = load volatile i32, i32* %px + %y56 = load volatile i32, i32* %py + %cmp56a = icmp sgt i32 %x56, %y56 + %cmp56b = xor i1 %cmp56a, -1 + %cmp56c = xor i1 %cmp56b, -1 + %r56a = select i1 %cmp56a, i32 %x56, i32 %y56 + %r56b = select i1 %cmp56b, i32 %y56, i32 %x56 + %r56c = select i1 %cmp56c, i32 %x56, i32 %y56 + store volatile i32 %r56a, i32* %pout + store volatile i32 %r56b, i32* %pout + store volatile i32 %r56c, i32* %pout + + %x57 = load volatile i32, i32* %px + %y57 = load volatile i32, i32* %py + %cmp57a = icmp slt i32 %x57, %y57 + %cmp57b = xor i1 %cmp57a, -1 + %cmp57c = xor i1 %cmp57b, -1 + %r57a = select i1 %cmp57a, i32 %x57, i32 %y57 + %r57b = select i1 %cmp57b, i32 %y57, i32 %x57 + %r57c = select i1 %cmp57c, i32 %x57, i32 %y57 + store volatile i32 %r57a, i32* %pout + store volatile i32 %r57b, i32* %pout + store volatile i32 %r57c, i32* %pout + + %x58 = load volatile i32, i32* %px + %y58 = load volatile i32, i32* %py + %cmp58a = icmp sgt i32 %x58, %y58 + %cmp58b = xor i1 %cmp58a, -1 + %cmp58c = xor i1 %cmp58b, -1 + %r58a = select i1 %cmp58a, i32 %x58, i32 %y58 + %r58b = select i1 %cmp58b, i32 %y58, i32 %x58 + %r58c = select i1 %cmp58c, i32 %x58, i32 %y58 + store volatile i32 %r58a, i32* %pout + store volatile i32 %r58b, i32* %pout + store volatile i32 %r58c, i32* %pout + + %x59 = load volatile i32, i32* %px + %y59 = load volatile i32, i32* %py + %cmp59a = icmp slt i32 %x59, %y59 + %cmp59b = xor i1 %cmp59a, -1 + %cmp59c = xor i1 %cmp59b, -1 + %r59a = select i1 %cmp59a, i32 %x59, i32 %y59 + %r59b = select i1 %cmp59b, i32 %y59, i32 %x59 + %r59c = select i1 %cmp59c, i32 %x59, i32 %y59 + store volatile i32 %r59a, i32* %pout + store volatile i32 %r59b, i32* %pout + store volatile i32 %r59c, i32* %pout + + %x60 = load volatile i32, i32* %px + %y60 = load volatile i32, i32* %py + %cmp60a = icmp sgt i32 %x60, %y60 + %cmp60b = xor i1 %cmp60a, -1 + %cmp60c = xor i1 %cmp60b, -1 + %r60a = select i1 %cmp60a, i32 %x60, i32 %y60 + %r60b = select i1 %cmp60b, i32 %y60, i32 %x60 + %r60c = select i1 %cmp60c, i32 %x60, i32 %y60 + store volatile i32 %r60a, i32* %pout + store volatile i32 %r60b, i32* %pout + store volatile i32 %r60c, i32* %pout + + %x61 = load volatile i32, i32* %px + %y61 = load volatile i32, i32* %py + %cmp61a = icmp slt i32 %x61, %y61 + %cmp61b = xor i1 %cmp61a, -1 + %cmp61c = xor i1 %cmp61b, -1 + %r61a = select i1 %cmp61a, i32 %x61, i32 %y61 + %r61b = select i1 %cmp61b, i32 %y61, i32 %x61 + %r61c = select i1 %cmp61c, i32 %x61, i32 %y61 + store volatile i32 %r61a, i32* %pout + store volatile i32 %r61b, i32* %pout + store volatile i32 %r61c, i32* %pout + + %x62 = load volatile i32, i32* %px + %y62 = load volatile i32, i32* %py + %cmp62a = icmp sgt i32 %x62, %y62 + %cmp62b = xor i1 %cmp62a, -1 + %cmp62c = xor i1 %cmp62b, -1 + %r62a = select i1 %cmp62a, i32 %x62, i32 %y62 + %r62b = select i1 %cmp62b, i32 %y62, i32 %x62 + %r62c = select i1 %cmp62c, i32 %x62, i32 %y62 + store volatile i32 %r62a, i32* %pout + store volatile i32 %r62b, i32* %pout + store volatile i32 %r62c, i32* %pout + + %x63 = load volatile i32, i32* %px + %y63 = load volatile i32, i32* %py + %cmp63a = icmp slt i32 %x63, %y63 + %cmp63b = xor i1 %cmp63a, -1 + %cmp63c = xor i1 %cmp63b, -1 + %r63a = select i1 %cmp63a, i32 %x63, i32 %y63 + %r63b = select i1 %cmp63b, i32 %y63, i32 %x63 + %r63c = select i1 %cmp63c, i32 %x63, i32 %y63 + store volatile i32 %r63a, i32* %pout + store volatile i32 %r63b, i32* %pout + store volatile i32 %r63c, i32* %pout + + %x64 = load volatile i32, i32* %px + %y64 = load volatile i32, i32* %py + %cmp64a = icmp sgt i32 %x64, %y64 + %cmp64b = xor i1 %cmp64a, -1 + %cmp64c = xor i1 %cmp64b, -1 + %r64a = select i1 %cmp64a, i32 %x64, i32 %y64 + %r64b = select i1 %cmp64b, i32 %y64, i32 %x64 + %r64c = select i1 %cmp64c, i32 %x64, i32 %y64 + store volatile i32 %r64a, i32* %pout + store volatile i32 %r64b, i32* %pout + store volatile i32 %r64c, i32* %pout + + %x65 = load volatile i32, i32* %px + %y65 = load volatile i32, i32* %py + %cmp65a = icmp slt i32 %x65, %y65 + %cmp65b = xor i1 %cmp65a, -1 + %cmp65c = xor i1 %cmp65b, -1 + %r65a = select i1 %cmp65a, i32 %x65, i32 %y65 + %r65b = select i1 %cmp65b, i32 %y65, i32 %x65 + %r65c = select i1 %cmp65c, i32 %x65, i32 %y65 + store volatile i32 %r65a, i32* %pout + store volatile i32 %r65b, i32* %pout + store volatile i32 %r65c, i32* %pout + + %x66 = load volatile i32, i32* %px + %y66 = load volatile i32, i32* %py + %cmp66a = icmp sgt i32 %x66, %y66 + %cmp66b = xor i1 %cmp66a, -1 + %cmp66c = xor i1 %cmp66b, -1 + %r66a = select i1 %cmp66a, i32 %x66, i32 %y66 + %r66b = select i1 %cmp66b, i32 %y66, i32 %x66 + %r66c = select i1 %cmp66c, i32 %x66, i32 %y66 + store volatile i32 %r66a, i32* %pout + store volatile i32 %r66b, i32* %pout + store volatile i32 %r66c, i32* %pout + + %x67 = load volatile i32, i32* %px + %y67 = load volatile i32, i32* %py + %cmp67a = icmp slt i32 %x67, %y67 + %cmp67b = xor i1 %cmp67a, -1 + %cmp67c = xor i1 %cmp67b, -1 + %r67a = select i1 %cmp67a, i32 %x67, i32 %y67 + %r67b = select i1 %cmp67b, i32 %y67, i32 %x67 + %r67c = select i1 %cmp67c, i32 %x67, i32 %y67 + store volatile i32 %r67a, i32* %pout + store volatile i32 %r67b, i32* %pout + store volatile i32 %r67c, i32* %pout + + %x68 = load volatile i32, i32* %px + %y68 = load volatile i32, i32* %py + %cmp68a = icmp sgt i32 %x68, %y68 + %cmp68b = xor i1 %cmp68a, -1 + %cmp68c = xor i1 %cmp68b, -1 + %r68a = select i1 %cmp68a, i32 %x68, i32 %y68 + %r68b = select i1 %cmp68b, i32 %y68, i32 %x68 + %r68c = select i1 %cmp68c, i32 %x68, i32 %y68 + store volatile i32 %r68a, i32* %pout + store volatile i32 %r68b, i32* %pout + store volatile i32 %r68c, i32* %pout + + %x69 = load volatile i32, i32* %px + %y69 = load volatile i32, i32* %py + %cmp69a = icmp slt i32 %x69, %y69 + %cmp69b = xor i1 %cmp69a, -1 + %cmp69c = xor i1 %cmp69b, -1 + %r69a = select i1 %cmp69a, i32 %x69, i32 %y69 + %r69b = select i1 %cmp69b, i32 %y69, i32 %x69 + %r69c = select i1 %cmp69c, i32 %x69, i32 %y69 + store volatile i32 %r69a, i32* %pout + store volatile i32 %r69b, i32* %pout + store volatile i32 %r69c, i32* %pout + + %x70 = load volatile i32, i32* %px + %y70 = load volatile i32, i32* %py + %cmp70a = icmp sgt i32 %x70, %y70 + %cmp70b = xor i1 %cmp70a, -1 + %cmp70c = xor i1 %cmp70b, -1 + %r70a = select i1 %cmp70a, i32 %x70, i32 %y70 + %r70b = select i1 %cmp70b, i32 %y70, i32 %x70 + %r70c = select i1 %cmp70c, i32 %x70, i32 %y70 + store volatile i32 %r70a, i32* %pout + store volatile i32 %r70b, i32* %pout + store volatile i32 %r70c, i32* %pout + + %x71 = load volatile i32, i32* %px + %y71 = load volatile i32, i32* %py + %cmp71a = icmp slt i32 %x71, %y71 + %cmp71b = xor i1 %cmp71a, -1 + %cmp71c = xor i1 %cmp71b, -1 + %r71a = select i1 %cmp71a, i32 %x71, i32 %y71 + %r71b = select i1 %cmp71b, i32 %y71, i32 %x71 + %r71c = select i1 %cmp71c, i32 %x71, i32 %y71 + store volatile i32 %r71a, i32* %pout + store volatile i32 %r71b, i32* %pout + store volatile i32 %r71c, i32* %pout + + %x72 = load volatile i32, i32* %px + %y72 = load volatile i32, i32* %py + %cmp72a = icmp sgt i32 %x72, %y72 + %cmp72b = xor i1 %cmp72a, -1 + %cmp72c = xor i1 %cmp72b, -1 + %r72a = select i1 %cmp72a, i32 %x72, i32 %y72 + %r72b = select i1 %cmp72b, i32 %y72, i32 %x72 + %r72c = select i1 %cmp72c, i32 %x72, i32 %y72 + store volatile i32 %r72a, i32* %pout + store volatile i32 %r72b, i32* %pout + store volatile i32 %r72c, i32* %pout + + ret void +}