Index: lib/Transforms/Scalar/GVN.cpp =================================================================== --- lib/Transforms/Scalar/GVN.cpp +++ lib/Transforms/Scalar/GVN.cpp @@ -273,20 +273,42 @@ Expression e; e.type = I->getType(); e.opcode = I->getOpcode(); - for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); - OI != OE; ++OI) - e.varargs.push_back(lookupOrAdd(*OI)); - if (I->isCommutative()) { - // Ensure that commutative instructions that only differ by a permutation - // of their operands get the same value number by sorting the operand value - // numbers. Since all commutative instructions have two operands it is more - // efficient to sort by hand rather than using, say, std::sort. - assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); - if (e.varargs[0] > e.varargs[1]) - std::swap(e.varargs[0], e.varargs[1]); - e.commutative = true; + if (I->isAssociative() && I->isCommutative()) { + // For associative expressions, we want to get the same hash regardless of + // the current association of the expression in the IR. NOTE: This is not + // an actually viable implementation since it's potentially O(I^2) for + // large expression trees. It's purely a proof of concept for the moment. + SmallVector Worklist; + Worklist.push_back(I); + while (!Worklist.empty()) { + Instruction *Cur = Worklist.pop_back_val(); + for (Value *Op : Cur->operands()) { + Instruction *OpI = dyn_cast(Op); + if (OpI && OpI->getOpcode() == I->getOpcode()) + Worklist.push_back(OpI); + else + e.varargs.push_back(lookupOrAdd(Op)); + } + } + std::sort(e.varargs.begin(), e.varargs.end()); + } else { + for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); + OI != OE; ++OI) + e.varargs.push_back(lookupOrAdd(*OI)); + if (I->isCommutative()) { + // Ensure that commutative instructions that only differ by a permutation + // of their operands get the same value number by sorting the operand + // value numbers. Since all commutative instructions have two operands + // it is more efficient to sort by hand rather than using, say, std::sort. + assert(I->getNumOperands() == 2 && + "Unsupported commutative instruction!"); + if (e.varargs[0] > e.varargs[1]) + std::swap(e.varargs[0], e.varargs[1]); + e.commutative = true; + } } + if (CmpInst *C = dyn_cast(I)) { // Sort the operand value numbers so xx get the same value number. CmpInst::Predicate Predicate = C->getPredicate(); Index: lib/Transforms/Scalar/Reassociate.cpp =================================================================== --- lib/Transforms/Scalar/Reassociate.cpp +++ lib/Transforms/Scalar/Reassociate.cpp @@ -388,11 +388,8 @@ /// /// Note that the values Ops[0].first, ..., Ops[N].first are all distinct. /// -/// This routine may modify the function, in which case it returns 'true'. The -/// changes it makes may well be destructive, changing the value computed by 'I' -/// to something completely different. Thus if the routine returns 'true' then -/// you MUST either replace I with a new expression computed from the Ops array, -/// or use RewriteExprTree to put the values back in. +/// This routine may modify the function, in which case it returns 'true'. It +/// does this to increase oppurtunities for reassociation. /// /// A leaf node is either not a binary operation of the same kind as the root /// node 'I' (i.e. is not a binary operator at all, or is, but with a different @@ -421,29 +418,6 @@ /// then the instruction also belongs to the expression, is not a leaf node of /// it, and its operands also belong to the expression (but may be leaf nodes). /// -/// NOTE: This routine will set operands of non-leaf non-root nodes to undef in -/// order to ensure that every non-root node in the expression has *exactly one* -/// use by a non-leaf node of the expression. This destruction means that the -/// caller MUST either replace 'I' with a new expression or use something like -/// RewriteExprTree to put the values back in if the routine indicates that it -/// made a change by returning 'true'. -/// -/// In the above example either the right operand of A or the left operand of B -/// will be replaced by undef. If it is B's operand then this gives: -/// -/// + | I -/// / \ | -/// + + | A, B - operand of B replaced with undef -/// / \ \ | -/// * + * | C, D, E -/// / \ / \ / \ | -/// + * | F, G -/// -/// Note that such undef operands can only be reached by passing through 'I'. -/// For example, if you visit operands recursively starting from a leaf node -/// then you will never see such an undef operand unless you get back to 'I', -/// which requires passing through a phi node. -/// /// Note that this routine may also mutate binary operators of the wrong type /// that have all uses inside the expression (i.e. only used by non-leaf nodes /// of the expression) if it can turn them into binary operators of the right @@ -536,25 +510,6 @@ // Update the number of paths to the leaf. IncorporateWeight(It->second, Weight, Opcode); -#if 0 // TODO: Re-enable once PR13021 is fixed. - // The leaf already has one use from inside the expression. As we want - // exactly one such use, drop this new use of the leaf. - assert(!Op->hasOneUse() && "Only one use, but we got here twice!"); - I->setOperand(OpIdx, UndefValue::get(I->getType())); - Changed = true; - - // If the leaf is a binary operation of the right kind and we now see - // that its multiple original uses were in fact all by nodes belonging - // to the expression, then no longer consider it to be a leaf and add - // its operands to the expression. - if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) { - LLVM_DEBUG(dbgs() << "UNLEAF: " << *Op << " (" << It->second << ")\n"); - Worklist.push_back(std::make_pair(BO, It->second)); - Leaves.erase(It); - continue; - } -#endif - // If we still have uses that are not accounted for by the expression // then it is not safe to modify the value. if (!Op->hasOneUse()) @@ -635,6 +590,48 @@ SmallVectorImpl &Ops) { assert(Ops.size() > 1 && "Single values should be used directly!"); + // Check to see if we've actually made a meaningful change to the + // computation. We don't want to perturb the *schedule* of the computation + // by linearizing it if we haven't actually been able to improve the + // computation itself. TODO: At some point, finding a way to do an ILP vs + // RegPressure aware scheduler for when we *have* changed the expression + // would really help. + auto HaveUnchangedOperands = [&]() { + SmallPtrSet Leafs; + for (auto &Op : Ops) + // TODO: handle duplicates + if (!Leafs.insert(Op.Op).second) + return false; + + unsigned Opcode = I->getOpcode(); + SmallVector Worklist; + Worklist.push_back(I); + while(!Worklist.empty()) { + BinaryOperator *Op = Worklist.pop_back_val(); + Value *LHS = Op->getOperand(0); + Value *RHS = Op->getOperand(1); + if (Leafs.count(LHS)) + Leafs.erase(LHS); + else if (auto *LHSBO = isReassociableOp(LHS, Opcode)) + Worklist.push_back(LHSBO); + else + // Unrecognized leaf + return false; + + if (Leafs.count(RHS)) + Leafs.erase(RHS); + else if (auto *RHSBO = isReassociableOp(RHS, Opcode)) + Worklist.push_back(RHSBO); + else + // unrecognized leaf + return false; + } + // If we didn't find any of our leaves, we've modified the original. + return Leafs.empty(); + }; + if (HaveUnchangedOperands()) + return; + // Since our optimizations should never increase the number of operations, the // new expression can usually be written reusing the existing binary operators // from the original expression tree, without creating any new instructions, Index: test/Transforms/Reassociate/2002-05-15-MissedTree.ll =================================================================== --- test/Transforms/Reassociate/2002-05-15-MissedTree.ll +++ test/Transforms/Reassociate/2002-05-15-MissedTree.ll @@ -3,7 +3,7 @@ define i32 @test1(i32 %A, i32 %B) { ; CHECK-LABEL: @test1( -; CHECK-NEXT: [[Z:%.*]] = add i32 [[B:%.*]], [[A:%.*]] +; CHECK-NEXT: [[Z:%.*]] = add i32 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: ret i32 [[Z]] ; %W = add i32 %B, -5 Index: test/Transforms/Reassociate/2002-05-15-SubReassociate.ll =================================================================== --- test/Transforms/Reassociate/2002-05-15-SubReassociate.ll +++ test/Transforms/Reassociate/2002-05-15-SubReassociate.ll @@ -17,7 +17,7 @@ ; With sub reassociation, constant folding can eliminate the two 12 constants. define i32 @test2(i32 %A, i32 %B, i32 %C, i32 %D) { ; CHECK-LABEL: @test2( -; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B:%.*]], [[A:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[C:%.*]] ; CHECK-NEXT: [[Q:%.*]] = sub i32 [[D:%.*]], [[TMP2]] ; CHECK-NEXT: ret i32 [[Q]] Index: test/Transforms/Reassociate/basictest.ll =================================================================== --- test/Transforms/Reassociate/basictest.ll +++ test/Transforms/Reassociate/basictest.ll @@ -14,7 +14,7 @@ define i32 @test2(i32 %reg109, i32 %reg1111) { ; CHECK-LABEL: @test2( -; CHECK-NEXT: [[REG117:%.*]] = add i32 [[REG1111:%.*]], [[REG109:%.*]] +; CHECK-NEXT: [[REG117:%.*]] = add i32 [[REG109:%.*]], [[REG1111:%.*]] ; CHECK-NEXT: ret i32 [[REG117]] ; %reg115 = add i32 %reg109, -30 @@ -34,7 +34,7 @@ ; CHECK-NEXT: [[A:%.*]] = load i32, i32* @a, align 4 ; CHECK-NEXT: [[B:%.*]] = load i32, i32* @b, align 4 ; CHECK-NEXT: [[C:%.*]] = load i32, i32* @c, align 4 -; CHECK-NEXT: [[T1:%.*]] = add i32 [[B]], [[A]] +; CHECK-NEXT: [[T1:%.*]] = add i32 [[A]], [[B]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], [[C]] ; CHECK-NEXT: store i32 [[T2]], i32* @e, align 4 ; CHECK-NEXT: store i32 [[T2]], i32* @f, align 4 @@ -59,7 +59,7 @@ ; CHECK-NEXT: [[A:%.*]] = load i32, i32* @a, align 4 ; CHECK-NEXT: [[B:%.*]] = load i32, i32* @b, align 4 ; CHECK-NEXT: [[C:%.*]] = load i32, i32* @c, align 4 -; CHECK-NEXT: [[T1:%.*]] = add i32 [[B]], [[A]] +; CHECK-NEXT: [[T1:%.*]] = add i32 [[A]], [[B]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], [[C]] ; CHECK-NEXT: store i32 [[T2]], i32* @e, align 4 ; CHECK-NEXT: store i32 [[T2]], i32* @f, align 4 @@ -84,7 +84,7 @@ ; CHECK-NEXT: [[A:%.*]] = load i32, i32* @a, align 4 ; CHECK-NEXT: [[B:%.*]] = load i32, i32* @b, align 4 ; CHECK-NEXT: [[C:%.*]] = load i32, i32* @c, align 4 -; CHECK-NEXT: [[T1:%.*]] = add i32 [[B]], [[A]] +; CHECK-NEXT: [[T1:%.*]] = add i32 [[A]], [[B]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], [[C]] ; CHECK-NEXT: store i32 [[T2]], i32* @e, align 4 ; CHECK-NEXT: store i32 [[T2]], i32* @f, align 4 @@ -129,7 +129,7 @@ define i32 @test7(i32 %A, i32 %B, i32 %C) { ; CHECK-LABEL: @test7( -; CHECK-NEXT: [[REASS_ADD1:%.*]] = add i32 [[C:%.*]], [[B:%.*]] +; CHECK-NEXT: [[REASS_ADD1:%.*]] = add i32 [[B:%.*]], [[C:%.*]] ; CHECK-NEXT: [[REASS_MUL2:%.*]] = mul i32 [[A:%.*]], [[A]] ; CHECK-NEXT: [[REASS_MUL:%.*]] = mul i32 [[REASS_MUL2]], [[REASS_ADD1]] ; CHECK-NEXT: ret i32 [[REASS_MUL]] @@ -144,8 +144,8 @@ define i32 @test8(i32 %X, i32 %Y, i32 %Z) { ; CHECK-LABEL: @test8( -; CHECK-NEXT: [[A:%.*]] = mul i32 [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[C:%.*]] = sub i32 [[Z:%.*]], [[A]] +; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[C:%.*]] = sub i32 [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[C]] ; %A = sub i32 0, %X @@ -281,9 +281,10 @@ define i32 @test17(i32 %X1, i32 %X2, i32 %X3, i32 %X4) { ; CHECK-LABEL: @test17( -; CHECK-NEXT: [[A:%.*]] = mul i32 [[X4:%.*]], [[X3:%.*]] -; CHECK-NEXT: [[C:%.*]] = mul i32 [[A]], [[X1:%.*]] -; CHECK-NEXT: [[D:%.*]] = mul i32 [[A]], [[X2:%.*]] +; CHECK-NEXT: [[A:%.*]] = mul i32 [[X1:%.*]], [[X3:%.*]] +; CHECK-NEXT: [[B:%.*]] = mul i32 [[X2:%.*]], [[X3]] +; CHECK-NEXT: [[C:%.*]] = mul i32 [[A]], [[X4:%.*]] +; CHECK-NEXT: [[D:%.*]] = mul i32 [[B]], [[X4]] ; CHECK-NEXT: [[E:%.*]] = xor i32 [[C]], [[D]] ; CHECK-NEXT: ret i32 [[E]] ; Index: test/Transforms/Reassociate/canonicalize-neg-const.ll =================================================================== --- test/Transforms/Reassociate/canonicalize-neg-const.ll +++ test/Transforms/Reassociate/canonicalize-neg-const.ll @@ -173,7 +173,7 @@ ; CHECK-NEXT: [[SUB:%.*]] = fsub fast double 1.000000e+00, [[A:%.*]] ; CHECK-NEXT: [[POW2:%.*]] = fmul double [[A]], [[A]] ; CHECK-NEXT: [[MUL5_NEG:%.*]] = fmul fast double [[POW2]], -5.000000e-01 -; CHECK-NEXT: [[SUB1:%.*]] = fadd fast double [[MUL5_NEG]], [[SUB]] +; CHECK-NEXT: [[SUB1:%.*]] = fadd fast double [[SUB]], [[MUL5_NEG]] ; CHECK-NEXT: [[FACTOR:%.*]] = fmul fast double [[SUB1]], 2.000000e+00 ; CHECK-NEXT: ret double [[FACTOR]] ; Index: test/Transforms/Reassociate/commute.ll =================================================================== --- test/Transforms/Reassociate/commute.ll +++ test/Transforms/Reassociate/commute.ll @@ -5,8 +5,8 @@ define void @test1(i32 %x, i32 %y) { ; CHECK-LABEL: @test1( -; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[Y]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[X]], [[Y]] ; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: call void @use(i32 [[TMP1]]) ; CHECK-NEXT: call void @use(i32 [[TMP3]]) Index: test/Transforms/Reassociate/factorize-again.ll =================================================================== --- test/Transforms/Reassociate/factorize-again.ll +++ test/Transforms/Reassociate/factorize-again.ll @@ -7,8 +7,8 @@ ; CHECK-NEXT: [[TMP2:%.*]] = fsub float undef, [[TMP0:%.*]] ; CHECK-NEXT: [[TMP3:%.*]] = fsub float undef, [[TMP1:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.rsqrt.f32(float undef) -; CHECK-NEXT: [[REASS_ADD2:%.*]] = fadd fast float [[TMP3]], [[TMP2]] -; CHECK-NEXT: [[REASS_MUL3:%.*]] = fmul fast float [[TMP4]], [[REASS_ADD2]] +; CHECK-NEXT: [[REASS_ADD2:%.*]] = fadd fast float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[REASS_MUL3:%.*]] = fmul fast float [[REASS_ADD2]], [[TMP4]] ; CHECK-NEXT: [[REASS_ADD1:%.*]] = fadd fast float [[REASS_MUL3]], [[TMP4]] ; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[REASS_ADD1]], undef ; CHECK-NEXT: [[TMP5:%.*]] = call float @foo2(float [[REASS_MUL]], float 0.000000e+00) Index: test/Transforms/Reassociate/fast-ReassociateVector.ll =================================================================== --- test/Transforms/Reassociate/fast-ReassociateVector.ll +++ test/Transforms/Reassociate/fast-ReassociateVector.ll @@ -5,7 +5,7 @@ define <4 x float> @test1(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: @test1( -; CHECK-NEXT: [[REASS_ADD:%.*]] = fadd fast <4 x float> [[B:%.*]], [[A:%.*]] +; CHECK-NEXT: [[REASS_ADD:%.*]] = fadd fast <4 x float> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast <4 x float> [[REASS_ADD]], [[C:%.*]] ; CHECK-NEXT: ret <4 x float> [[REASS_MUL]] ; @@ -34,7 +34,7 @@ define <2 x float> @test2(<2 x float> %a, <2 x float> %b, <2 x float> %c) { ; CHECK-LABEL: @test2( -; CHECK-NEXT: [[REASS_ADD1:%.*]] = fadd fast <2 x float> [[C:%.*]], [[B:%.*]] +; CHECK-NEXT: [[REASS_ADD1:%.*]] = fadd fast <2 x float> [[B:%.*]], [[C:%.*]] ; CHECK-NEXT: [[REASS_MUL2:%.*]] = fmul fast <2 x float> [[A:%.*]], [[A]] ; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast <2 x float> [[REASS_MUL2]], [[REASS_ADD1]] ; CHECK-NEXT: ret <2 x float> [[REASS_MUL]] @@ -70,9 +70,9 @@ define <2 x double> @test3(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %d) { ; CHECK-LABEL: @test3( -; CHECK-NEXT: [[REASS_ADD:%.*]] = fadd fast <2 x double> [[C:%.*]], [[B:%.*]] -; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast <2 x double> [[REASS_ADD]], [[A:%.*]] -; CHECK-NEXT: [[T3:%.*]] = fadd fast <2 x double> [[REASS_MUL]], [[D:%.*]] +; CHECK-NEXT: [[REASS_ADD:%.*]] = fadd fast <2 x double> [[B:%.*]], [[C:%.*]] +; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast <2 x double> [[A:%.*]], [[REASS_ADD]] +; CHECK-NEXT: [[T3:%.*]] = fadd fast <2 x double> [[D:%.*]], [[REASS_MUL]] ; CHECK-NEXT: ret <2 x double> [[T3]] ; %t0 = fmul fast <2 x double> %a, %b @@ -343,8 +343,8 @@ define <2 x double> @test11(<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: @test11( -; CHECK-NEXT: [[FACTOR:%.*]] = fmul fast <2 x double> [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast <2 x double> [[FACTOR]], +; CHECK-NEXT: [[FACTOR:%.*]] = fmul fast <2 x double> [[X:%.*]], +; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast <2 x double> [[FACTOR]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x double> [[REASS_MUL]] ; %1 = fmul fast <2 x double> %x, %y @@ -372,7 +372,7 @@ define <2 x i64> @test12(<2 x i64> %b, <2 x i64> %c) { ; CHECK-LABEL: @test12( -; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i64> [[C:%.*]], [[B:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i64> [[B:%.*]], [[C:%.*]] ; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i64> [[MUL]], ; CHECK-NEXT: ret <2 x i64> [[SHL]] ; @@ -388,7 +388,7 @@ define <4 x float> @test13(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: @test13( ; CHECK-NEXT: [[MUL:%.*]] = fmul fast <4 x float> [[B:%.*]], -; CHECK-NEXT: [[ADD:%.*]] = fadd fast <4 x float> [[MUL]], [[A:%.*]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast <4 x float> [[A:%.*]], [[MUL]] ; CHECK-NEXT: ret <4 x float> [[ADD]] ; %mul = fmul fast <4 x float> , %b @@ -401,7 +401,7 @@ define <2 x i64> @test14(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) { ; CHECK-LABEL: @test14( -; CHECK-NEXT: [[ADD:%.*]] = add <2 x i64> [[B:%.*]], [[A:%.*]] +; CHECK-NEXT: [[ADD:%.*]] = add <2 x i64> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[C_NEG:%.*]] = sub <2 x i64> zeroinitializer, [[C:%.*]] ; CHECK-NEXT: [[SUB:%.*]] = add <2 x i64> [[ADD]], [[C_NEG]] ; CHECK-NEXT: ret <2 x i64> [[SUB]] @@ -413,7 +413,7 @@ define <2 x i32> @test15(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @test15( -; CHECK-NEXT: [[TMP3:%.*]] = and <2 x i32> [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and <2 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i32> [[TMP3]] ; %tmp1 = and <2 x i32> %x, %y @@ -424,7 +424,7 @@ define <2 x i32> @test16(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @test16( -; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i32> [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i32> [[TMP3]] ; %tmp1 = or <2 x i32> %x, %y @@ -445,7 +445,7 @@ define <2 x i32> @test18(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @test18( -; CHECK-NEXT: [[TMP5:%.*]] = xor <2 x i32> [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP5:%.*]] = xor <2 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i32> [[TMP5]] ; %tmp1 = xor <2 x i32> %x, %y Index: test/Transforms/Reassociate/fast-SubReassociate.ll =================================================================== --- test/Transforms/Reassociate/fast-SubReassociate.ll +++ test/Transforms/Reassociate/fast-SubReassociate.ll @@ -85,8 +85,8 @@ ; %Q = fsub fast float %D, %sum1 ; ret i32 %Q ; CHECK-LABEL: @test4( -; CHECK-NEXT: [[B_NEG:%.*]] = fsub fast float -0.000000e+00, [[B:%.*]] -; CHECK-NEXT: [[O_NEG:%.*]] = fsub fast float [[B_NEG]], [[A:%.*]] +; CHECK-NEXT: [[A_NEG:%.*]] = fsub fast float -0.000000e+00, [[A:%.*]] +; CHECK-NEXT: [[O_NEG:%.*]] = fsub fast float [[A_NEG]], [[B:%.*]] ; CHECK-NEXT: [[P:%.*]] = fsub fast float [[O_NEG]], [[C:%.*]] ; CHECK-NEXT: [[Q:%.*]] = fadd fast float [[P]], [[D:%.*]] ; CHECK-NEXT: ret float [[Q]] Index: test/Transforms/Reassociate/fast-basictest.ll =================================================================== --- test/Transforms/Reassociate/fast-basictest.ll +++ test/Transforms/Reassociate/fast-basictest.ll @@ -84,7 +84,7 @@ ; CHECK-NEXT: [[A:%.*]] = load float, float* @fa, align 4 ; CHECK-NEXT: [[B:%.*]] = load float, float* @fb, align 4 ; CHECK-NEXT: [[C:%.*]] = load float, float* @fc, align 4 -; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[B]], [[A]] +; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[A]], [[B]] ; CHECK-NEXT: [[T2:%.*]] = fadd fast float [[T1]], [[C]] ; CHECK-NEXT: store float [[T2]], float* @fe, align 4 ; CHECK-NEXT: store float [[T2]], float* @ff, align 4 @@ -109,7 +109,7 @@ ; CHECK-NEXT: [[A:%.*]] = load float, float* @fa, align 4 ; CHECK-NEXT: [[B:%.*]] = load float, float* @fb, align 4 ; CHECK-NEXT: [[C:%.*]] = load float, float* @fc, align 4 -; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[B]], [[A]] +; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[A]], [[B]] ; CHECK-NEXT: [[T2:%.*]] = fadd fast float [[T1]], [[C]] ; CHECK-NEXT: store float [[T2]], float* @fe, align 4 ; CHECK-NEXT: store float [[T2]], float* @ff, align 4 @@ -134,7 +134,7 @@ ; CHECK-NEXT: [[A:%.*]] = load float, float* @fa, align 4 ; CHECK-NEXT: [[B:%.*]] = load float, float* @fb, align 4 ; CHECK-NEXT: [[C:%.*]] = load float, float* @fc, align 4 -; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[B]], [[A]] +; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[A]], [[B]] ; CHECK-NEXT: [[T2:%.*]] = fadd fast float [[T1]], [[C]] ; CHECK-NEXT: store float [[T2]], float* @fe, align 4 ; CHECK-NEXT: store float [[T2]], float* @ff, align 4 @@ -156,7 +156,7 @@ define float @test7(float %A, float %B, float %C) { ; CHECK-LABEL: @test7( -; CHECK-NEXT: [[REASS_ADD1:%.*]] = fadd fast float [[C:%.*]], [[B:%.*]] +; CHECK-NEXT: [[REASS_ADD1:%.*]] = fadd fast float [[B:%.*]], [[C:%.*]] ; CHECK-NEXT: [[REASS_MUL2:%.*]] = fmul fast float [[A:%.*]], [[A]] ; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[REASS_MUL2]], [[REASS_ADD1]] ; CHECK-NEXT: ret float [[REASS_MUL]] @@ -190,8 +190,8 @@ define float @test8(float %X, float %Y, float %Z) { ; CHECK-LABEL: @test8( -; CHECK-NEXT: [[A:%.*]] = fmul fast float [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[C:%.*]] = fsub fast float [[Z:%.*]], [[A]] +; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[C:%.*]] = fsub fast float [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[C]] ; %A = fsub fast float 0.0, %X @@ -398,9 +398,9 @@ define float @test13_unary_fneg(float %X1, float %X2, float %X3) { ; CHECK-LABEL: @test13_unary_fneg( -; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float [[X3:%.*]], [[X2:%.*]] -; CHECK-NEXT: [[D:%.*]] = fmul fast float [[TMP1]], [[X1:%.*]] -; CHECK-NEXT: ret float [[D]] +; CHECK-NEXT: [[REASS_ADD:%.*]] = fsub fast float [[X3:%.*]], [[X2:%.*]] +; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[REASS_ADD]], [[X1:%.*]] +; CHECK-NEXT: ret float [[REASS_MUL]] ; %A = fneg fast float %X1 %B = fmul fast float %A, %X2 ; -X1*X2 @@ -543,10 +543,10 @@ define float @test17_unary_fneg(float %a, float %b, float %z) { ; CHECK-LABEL: @test17_unary_fneg( -; CHECK-NEXT: [[D:%.*]] = fmul fast float [[A:%.*]], 1.234500e+04 -; CHECK-NEXT: [[E:%.*]] = fmul fast float [[D]], [[B:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[E]], [[Z:%.*]] -; CHECK-NEXT: ret float [[TMP1]] +; CHECK-NEXT: [[E:%.*]] = fmul fast float [[A:%.*]], 1.234500e+04 +; CHECK-NEXT: [[F:%.*]] = fmul fast float [[E]], [[B:%.*]] +; CHECK-NEXT: [[G:%.*]] = fmul fast float [[F]], [[Z:%.*]] +; CHECK-NEXT: ret float [[G]] ; %c = fneg fast float %z %d = fmul fast float %a, %b Index: test/Transforms/Reassociate/fast-fp-commute.ll =================================================================== --- test/Transforms/Reassociate/fast-fp-commute.ll +++ test/Transforms/Reassociate/fast-fp-commute.ll @@ -5,8 +5,8 @@ define void @test1(float %x, float %y) { ; CHECK-LABEL: @test1( -; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[Y]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[X]], [[Y]] ; CHECK-NEXT: [[TMP3:%.*]] = fsub fast float [[TMP1]], [[TMP2]] ; CHECK-NEXT: call void @use(float [[TMP1]]) ; CHECK-NEXT: call void @use(float [[TMP3]]) @@ -22,8 +22,8 @@ define float @test2(float %x, float %y) { ; CHECK-LABEL: @test2( -; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[Y]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[X]], [[Y]] ; CHECK-NEXT: [[TMP3:%.*]] = fsub fast float [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret float [[TMP3]] ; @@ -35,8 +35,8 @@ define float @test3(float %x, float %y) { ; CHECK-LABEL: @test3( -; CHECK-NEXT: [[FACTOR:%.*]] = fmul fast float [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[FACTOR]], 2.000000e+00 +; CHECK-NEXT: [[FACTOR:%.*]] = fmul fast float [[X:%.*]], 2.000000e+00 +; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[FACTOR]], [[Y:%.*]] ; CHECK-NEXT: ret float [[REASS_MUL]] ; %1 = fmul fast float %x, %y Index: test/Transforms/Reassociate/fast-multistep.ll =================================================================== --- test/Transforms/Reassociate/fast-multistep.ll +++ test/Transforms/Reassociate/fast-multistep.ll @@ -5,7 +5,7 @@ define float @fmultistep1(float %a, float %b, float %c) { ; CHECK-LABEL: @fmultistep1( -; CHECK-NEXT: [[REASS_ADD1:%.*]] = fadd fast float [[C:%.*]], [[B:%.*]] +; CHECK-NEXT: [[REASS_ADD1:%.*]] = fadd fast float [[B:%.*]], [[C:%.*]] ; CHECK-NEXT: [[REASS_MUL2:%.*]] = fmul fast float [[A:%.*]], [[A]] ; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[REASS_MUL2]], [[REASS_ADD1]] ; CHECK-NEXT: ret float [[REASS_MUL]] @@ -22,9 +22,9 @@ define float @fmultistep2(float %a, float %b, float %c, float %d) { ; CHECK-LABEL: @fmultistep2( -; CHECK-NEXT: [[REASS_ADD:%.*]] = fadd fast float [[C:%.*]], [[B:%.*]] -; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[REASS_ADD]], [[A:%.*]] -; CHECK-NEXT: [[T3:%.*]] = fadd fast float [[REASS_MUL]], [[D:%.*]] +; CHECK-NEXT: [[REASS_ADD:%.*]] = fadd fast float [[B:%.*]], [[C:%.*]] +; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[A:%.*]], [[REASS_ADD]] +; CHECK-NEXT: [[T3:%.*]] = fadd fast float [[D:%.*]], [[REASS_MUL]] ; CHECK-NEXT: ret float [[T3]] ; %t0 = fmul fast float %a, %b Index: test/Transforms/Reassociate/looptest.ll =================================================================== --- test/Transforms/Reassociate/looptest.ll +++ test/Transforms/Reassociate/looptest.ll @@ -33,8 +33,8 @@ ; CHECK-NEXT: br i1 [[COND221]], label [[BB5]], label [[BB4:%.*]] ; CHECK: bb4: ; CHECK-NEXT: [[REG117:%.*]] = phi i32 [ [[REG118:%.*]], [[BB4]] ], [ 0, [[BB3]] ] -; CHECK-NEXT: [[REG113:%.*]] = add i32 [[REG116]], [[REG115]] -; CHECK-NEXT: [[REG114:%.*]] = add i32 [[REG113]], [[REG117]] +; CHECK-NEXT: [[REG113:%.*]] = add i32 [[REG115]], [[REG117]] +; CHECK-NEXT: [[REG114:%.*]] = add i32 [[REG116]], [[REG113]] ; CHECK-NEXT: [[CAST227:%.*]] = getelementptr [4 x i8], [4 x i8]* @.LC0, i64 0, i64 0 ; CHECK-NEXT: [[TMP0:%.*]] = call i32 (i8*, ...) @printf(i8* [[CAST227]], i32 [[REG114]]) ; CHECK-NEXT: [[REG118]] = add i32 [[REG117]], 1 Index: test/Transforms/Reassociate/matching-binops.ll =================================================================== --- test/Transforms/Reassociate/matching-binops.ll +++ test/Transforms/Reassociate/matching-binops.ll @@ -16,8 +16,8 @@ ; CHECK-LABEL: @and_shl( ; CHECK-NEXT: [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = and i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = and i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = shl i8 %x, %shamt @@ -31,8 +31,8 @@ ; CHECK-LABEL: @or_shl( ; CHECK-NEXT: [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = or i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = or i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = or i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = or i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = shl i8 %x, %shamt @@ -46,8 +46,8 @@ ; CHECK-LABEL: @xor_shl( ; CHECK-NEXT: [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = xor i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = xor i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = xor i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = xor i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = shl i8 %x, %shamt @@ -61,8 +61,8 @@ ; CHECK-LABEL: @and_lshr( ; CHECK-NEXT: [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = and i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = and i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = lshr i8 %x, %shamt @@ -76,8 +76,8 @@ ; CHECK-LABEL: @or_lshr( ; CHECK-NEXT: [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = or i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = or i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = or i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = or i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = lshr i8 %x, %shamt @@ -91,8 +91,8 @@ ; CHECK-LABEL: @xor_lshr( ; CHECK-NEXT: [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = xor i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = xor i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = xor i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = xor i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = lshr i8 %x, %shamt @@ -106,8 +106,8 @@ ; CHECK-LABEL: @and_ashr( ; CHECK-NEXT: [[SX:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = and i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = and i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = ashr i8 %x, %shamt @@ -121,8 +121,8 @@ ; CHECK-LABEL: @or_ashr( ; CHECK-NEXT: [[SX:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = or i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = or i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = or i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = or i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = ashr i8 %x, %shamt @@ -138,8 +138,8 @@ ; CHECK-LABEL: @xor_ashr( ; CHECK-NEXT: [[SX:%.*]] = ashr <2 x i8> [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = ashr <2 x i8> [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = xor <2 x i8> [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = xor <2 x i8> [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = xor <2 x i8> [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = xor <2 x i8> [[SY]], [[A]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %sx = ashr <2 x i8> %x, %shamt @@ -155,8 +155,8 @@ ; CHECK-LABEL: @or_and_shl( ; CHECK-NEXT: [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = or i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = or i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = shl i8 %x, %shamt @@ -172,8 +172,8 @@ ; CHECK-LABEL: @or_lshr_shl( ; CHECK-NEXT: [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = or i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = or i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = or i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = or i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = lshr i8 %x, %shamt @@ -189,8 +189,8 @@ ; CHECK-LABEL: @xor_lshr_multiuse( ; CHECK-NEXT: [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]] ; CHECK-NEXT: [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[A:%.*]] = xor i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = xor i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = xor i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = xor i8 [[SY]], [[A]] ; CHECK-NEXT: [[R2:%.*]] = sdiv i8 [[A]], [[R]] ; CHECK-NEXT: ret i8 [[R2]] ; @@ -207,9 +207,9 @@ define i8 @add_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) { ; CHECK-LABEL: @add_lshr( ; CHECK-NEXT: [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]] -; CHECK-NEXT: [[A:%.*]] = add i8 [[SX]], [[Z:%.*]] +; CHECK-NEXT: [[A:%.*]] = add i8 [[Z:%.*]], [[SX]] ; CHECK-NEXT: [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]] -; CHECK-NEXT: [[R:%.*]] = add i8 [[A]], [[SY]] +; CHECK-NEXT: [[R:%.*]] = add i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = lshr i8 %x, %shamt @@ -225,8 +225,8 @@ ; CHECK-LABEL: @mul_sub( ; CHECK-NEXT: [[SX:%.*]] = sub i8 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[SY:%.*]] = sub i8 [[Y:%.*]], [[M]] -; CHECK-NEXT: [[A:%.*]] = mul nsw i8 [[SX]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = mul nuw i8 [[A]], [[SY]] +; CHECK-NEXT: [[A:%.*]] = mul nsw i8 [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = mul nuw i8 [[SY]], [[A]] ; CHECK-NEXT: ret i8 [[R]] ; %sx = sub i8 %x, %m @@ -239,8 +239,8 @@ define i8 @add_mul(i8 %x, i8 %y, i8 %z, i8 %m) { ; CHECK-LABEL: @add_mul( ; CHECK-NEXT: [[SX:%.*]] = mul nuw i8 [[X:%.*]], 42 -; CHECK-NEXT: [[A:%.*]] = add nuw i8 [[Z:%.*]], [[SX]] -; CHECK-NEXT: [[SY:%.*]] = mul nsw i8 [[M:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = add nuw i8 [[SX]], [[Z:%.*]] +; CHECK-NEXT: [[SY:%.*]] = mul nsw i8 [[Y:%.*]], [[M:%.*]] ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A]], [[SY]] ; CHECK-NEXT: ret i8 [[R]] ; @@ -257,9 +257,9 @@ define float @fadd_fmul(float %x, float %y, float %z, float %m) { ; CHECK-LABEL: @fadd_fmul( ; CHECK-NEXT: [[SX:%.*]] = fmul float [[X:%.*]], [[M:%.*]] -; CHECK-NEXT: [[A:%.*]] = fadd fast float [[SX]], [[Z:%.*]] +; CHECK-NEXT: [[A:%.*]] = fadd fast float [[Z:%.*]], [[SX]] ; CHECK-NEXT: [[SY:%.*]] = fmul float [[Y:%.*]], [[M]] -; CHECK-NEXT: [[R:%.*]] = fadd fast float [[A]], [[SY]] +; CHECK-NEXT: [[R:%.*]] = fadd fast float [[SY]], [[A]] ; CHECK-NEXT: ret float [[R]] ; %sx = fmul float %x, %m @@ -273,8 +273,8 @@ ; CHECK-LABEL: @fmul_fdiv( ; CHECK-NEXT: [[SX:%.*]] = fdiv float [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[SY:%.*]] = fdiv float [[Y:%.*]], 4.200000e+01 -; CHECK-NEXT: [[A:%.*]] = fmul fast float [[SY]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = fmul fast float [[A]], [[SX]] +; CHECK-NEXT: [[A:%.*]] = fmul fast float [[Z:%.*]], [[SX]] +; CHECK-NEXT: [[R:%.*]] = fmul fast float [[SY]], [[A]] ; CHECK-NEXT: ret float [[R]] ; %sx = fdiv float %x, %m @@ -296,9 +296,9 @@ ; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[SHL]], metadata !16, metadata !DIExpression()), !dbg !25 ; CHECK-NEXT: [[SHL1:%.*]] = shl i32 [[Y]], [[SHAMT]], !dbg !26 ; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[SHL1]], metadata !17, metadata !DIExpression()), !dbg !27 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[Z]], !dbg !28 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[Z]], [[SHL]], !dbg !28 ; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[AND]], metadata !18, metadata !DIExpression()), !dbg !29 -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[AND]], [[SHL1]], !dbg !30 +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[SHL1]], [[AND]], !dbg !30 ; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[AND2]], metadata !19, metadata !DIExpression()), !dbg !31 ; CHECK-NEXT: ret i32 [[AND2]], !dbg !32 ; Index: test/Transforms/Reassociate/mixed-fast-nonfast-fp.ll =================================================================== --- test/Transforms/Reassociate/mixed-fast-nonfast-fp.ll +++ test/Transforms/Reassociate/mixed-fast-nonfast-fp.ll @@ -6,8 +6,8 @@ ; CHECK-NEXT: [[MUL3:%.*]] = fmul float [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[FACTOR:%.*]] = fmul fast float [[C:%.*]], 2.000000e+00 ; CHECK-NEXT: [[REASS_ADD1:%.*]] = fadd fast float [[FACTOR]], [[B]] -; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[REASS_ADD1]], [[A]] -; CHECK-NEXT: [[ADD3:%.*]] = fadd fast float [[REASS_MUL]], [[MUL3]] +; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[A]], [[REASS_ADD1]] +; CHECK-NEXT: [[ADD3:%.*]] = fadd fast float [[MUL3]], [[REASS_MUL]] ; CHECK-NEXT: ret float [[ADD3]] ; %mul1 = fmul fast float %a, %c @@ -23,10 +23,10 @@ define float @foo_reassoc(float %a,float %b, float %c) { ; CHECK-LABEL: @foo_reassoc( ; CHECK-NEXT: [[MUL1:%.*]] = fmul reassoc float [[A:%.*]], [[C:%.*]] -; CHECK-NEXT: [[MUL2:%.*]] = fmul fast float [[B:%.*]], [[A]] +; CHECK-NEXT: [[MUL2:%.*]] = fmul fast float [[A]], [[B:%.*]] ; CHECK-NEXT: [[MUL3:%.*]] = fmul float [[A]], [[B]] ; CHECK-NEXT: [[MUL4:%.*]] = fmul reassoc float [[A]], [[C]] -; CHECK-NEXT: [[ADD1:%.*]] = fadd fast float [[MUL1]], [[MUL3]] +; CHECK-NEXT: [[ADD1:%.*]] = fadd fast float [[MUL3]], [[MUL1]] ; CHECK-NEXT: [[ADD2:%.*]] = fadd reassoc float [[MUL2]], [[MUL4]] ; CHECK-NEXT: [[ADD3:%.*]] = fadd fast float [[ADD1]], [[ADD2]] ; CHECK-NEXT: ret float [[ADD3]] Index: test/Transforms/Reassociate/mulfactor.ll =================================================================== --- test/Transforms/Reassociate/mulfactor.ll +++ test/Transforms/Reassociate/mulfactor.ll @@ -5,9 +5,9 @@ ; CHECK-LABEL: @test1( ; CHECK-NEXT: [[T2:%.*]] = mul i32 [[A:%.*]], [[A]] ; CHECK-NEXT: [[T6:%.*]] = mul i32 [[A]], 2 -; CHECK-NEXT: [[REASS_ADD:%.*]] = add i32 [[T6]], [[B:%.*]] -; CHECK-NEXT: [[REASS_MUL:%.*]] = mul i32 [[REASS_ADD]], [[B]] -; CHECK-NEXT: [[T11:%.*]] = add i32 [[REASS_MUL]], [[T2]] +; CHECK-NEXT: [[REASS_ADD:%.*]] = add i32 [[B:%.*]], [[T6]] +; CHECK-NEXT: [[REASS_MUL:%.*]] = mul i32 [[B]], [[REASS_ADD]] +; CHECK-NEXT: [[T11:%.*]] = add i32 [[T2]], [[REASS_MUL]] ; CHECK-NEXT: ret i32 [[T11]] ; %t2 = mul i32 %a, %a Index: test/Transforms/Reassociate/multistep.ll =================================================================== --- test/Transforms/Reassociate/multistep.ll +++ test/Transforms/Reassociate/multistep.ll @@ -5,7 +5,7 @@ define i64 @multistep1(i64 %a, i64 %b, i64 %c) { ; CHECK-LABEL: @multistep1( -; CHECK-NEXT: [[REASS_ADD1:%.*]] = add i64 [[C:%.*]], [[B:%.*]] +; CHECK-NEXT: [[REASS_ADD1:%.*]] = add i64 [[B:%.*]], [[C:%.*]] ; CHECK-NEXT: [[REASS_MUL2:%.*]] = mul i64 [[A:%.*]], [[A]] ; CHECK-NEXT: [[REASS_MUL:%.*]] = mul i64 [[REASS_MUL2]], [[REASS_ADD1]] ; CHECK-NEXT: ret i64 [[REASS_MUL]] @@ -22,9 +22,9 @@ define i64 @multistep2(i64 %a, i64 %b, i64 %c, i64 %d) { ; CHECK-LABEL: @multistep2( -; CHECK-NEXT: [[REASS_ADD:%.*]] = add i64 [[C:%.*]], [[B:%.*]] -; CHECK-NEXT: [[REASS_MUL:%.*]] = mul i64 [[REASS_ADD]], [[A:%.*]] -; CHECK-NEXT: [[T3:%.*]] = add i64 [[REASS_MUL]], [[D:%.*]] +; CHECK-NEXT: [[REASS_ADD:%.*]] = add i64 [[B:%.*]], [[C:%.*]] +; CHECK-NEXT: [[REASS_MUL:%.*]] = mul i64 [[A:%.*]], [[REASS_ADD]] +; CHECK-NEXT: [[T3:%.*]] = add i64 [[D:%.*]], [[REASS_MUL]] ; CHECK-NEXT: ret i64 [[T3]] ; %t0 = mul i64 %a, %b Index: test/Transforms/Reassociate/no-op.ll =================================================================== --- test/Transforms/Reassociate/no-op.ll +++ test/Transforms/Reassociate/no-op.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: [[A0:%.*]] = add nsw i32 [[A:%.*]], 1 ; CHECK-NEXT: [[M0:%.*]] = mul nsw i32 [[A]], 3 ; CHECK-NEXT: [[A1:%.*]] = add nsw i32 [[A0]], [[B:%.*]] -; CHECK-NEXT: [[M1:%.*]] = mul nsw i32 [[M0]], [[B]] +; CHECK-NEXT: [[M1:%.*]] = mul nsw i32 [[B]], [[M0]] ; CHECK-NEXT: call void @use(i32 [[A1]]) ; CHECK-NEXT: call void @use(i32 [[M1]]) ; CHECK-NEXT: ret void @@ -30,9 +30,9 @@ define void @test2(i32 %a, i32 %b, i32 %c, i32 %d) { ; The initial add doesn't change so should not lose the nsw flag. ; CHECK-LABEL: @test2( -; CHECK-NEXT: [[A0:%.*]] = add nsw i32 [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[A1:%.*]] = add i32 [[A0]], [[C:%.*]] -; CHECK-NEXT: [[A2:%.*]] = add i32 [[A1]], [[D:%.*]] +; CHECK-NEXT: [[A0:%.*]] = add nsw i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[A1:%.*]] = add nsw i32 [[A0]], [[D:%.*]] +; CHECK-NEXT: [[A2:%.*]] = add nsw i32 [[C:%.*]], [[A1]] ; CHECK-NEXT: call void @use(i32 [[A2]]) ; CHECK-NEXT: ret void ; Index: test/Transforms/Reassociate/optional-flags.ll =================================================================== --- test/Transforms/Reassociate/optional-flags.ll +++ test/Transforms/Reassociate/optional-flags.ll @@ -7,8 +7,8 @@ define i64 @test0(i64 %a, i64 %b, i64 %c) { ; ; CHECK-LABEL: @test0( -; CHECK-NEXT: [[Y:%.*]] = add i64 [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[Z:%.*]] = add i64 [[Y]], [[C:%.*]] +; CHECK-NEXT: [[Y:%.*]] = add nsw i64 [[B:%.*]], [[C:%.*]] +; CHECK-NEXT: [[Z:%.*]] = add i64 [[A:%.*]], [[Y]] ; CHECK-NEXT: ret i64 [[Z]] ; %y = add nsw i64 %c, %b @@ -19,8 +19,8 @@ define i64 @test1(i64 %a, i64 %b, i64 %c) { ; ; CHECK-LABEL: @test1( -; CHECK-NEXT: [[Y:%.*]] = add i64 [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[Z:%.*]] = add i64 [[Y]], [[C:%.*]] +; CHECK-NEXT: [[Y:%.*]] = add i64 [[B:%.*]], [[C:%.*]] +; CHECK-NEXT: [[Z:%.*]] = add nsw i64 [[A:%.*]], [[Y]] ; CHECK-NEXT: ret i64 [[Z]] ; %y = add i64 %c, %b @@ -32,7 +32,7 @@ define i32 @test2(i32 %x, i32 %y) { ; ; CHECK-LABEL: @test2( -; CHECK-NEXT: [[S:%.*]] = add nsw i32 [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[S:%.*]] = add nsw i32 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret i32 [[S]] ; %s = add nsw i32 %x, %y Index: test/Transforms/Reassociate/pointer-collision-non-determinism.ll =================================================================== --- test/Transforms/Reassociate/pointer-collision-non-determinism.ll +++ test/Transforms/Reassociate/pointer-collision-non-determinism.ll @@ -33,36 +33,36 @@ ; CHECK-NEXT: [[TMP110:%.*]] = fsub fast float 1.000000e+00, [[TMP]] ; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[ARG]], 0x3FE99999A0000000 ; CHECK-NEXT: [[TMP311:%.*]] = fsub fast float 1.000000e+00, [[TMP2]] -; CHECK-NEXT: [[REASS_MUL160:%.*]] = fmul fast float [[TMP110]], [[ARG]] -; CHECK-NEXT: [[TMP4:%.*]] = fmul fast float [[REASS_MUL160]], [[TMP311]] -; CHECK-NEXT: [[TMP5:%.*]] = fadd fast float [[TMP4]], [[ARG]] -; CHECK-NEXT: [[TMP6:%.*]] = fmul fast float [[TMP5]], [[ARG]] -; CHECK-NEXT: [[TMP7:%.*]] = fadd fast float [[TMP6]], [[ARG]] -; CHECK-NEXT: [[TMP8:%.*]] = fmul fast float [[TMP7]], [[ARG]] -; CHECK-NEXT: [[TMP9:%.*]] = fadd fast float [[TMP8]], [[ARG]] -; CHECK-NEXT: [[TMP10:%.*]] = fmul fast float [[TMP9]], [[ARG]] -; CHECK-NEXT: [[TMP11:%.*]] = fadd fast float [[TMP10]], [[ARG]] -; CHECK-NEXT: [[TMP12:%.*]] = fmul fast float [[TMP11]], [[ARG]] -; CHECK-NEXT: [[TMP13:%.*]] = fadd fast float [[TMP12]], [[ARG]] -; CHECK-NEXT: [[TMP14:%.*]] = fmul fast float [[TMP13]], [[ARG]] -; CHECK-NEXT: [[TMP15:%.*]] = fadd fast float [[TMP14]], [[ARG]] -; CHECK-NEXT: [[TMP16:%.*]] = fmul fast float [[TMP15]], [[ARG]] -; CHECK-NEXT: [[TMP17:%.*]] = fadd fast float [[TMP16]], [[ARG]] -; CHECK-NEXT: [[TMP18:%.*]] = fmul fast float [[TMP17]], [[ARG]] -; CHECK-NEXT: [[TMP19:%.*]] = fadd fast float [[TMP18]], [[ARG]] -; CHECK-NEXT: [[TMP20:%.*]] = fmul fast float [[TMP19]], [[ARG]] -; CHECK-NEXT: [[TMP21:%.*]] = fadd fast float [[TMP20]], [[ARG]] -; CHECK-NEXT: [[TMP22:%.*]] = fmul fast float [[TMP21]], [[ARG]] -; CHECK-NEXT: [[TMP23:%.*]] = fadd fast float [[TMP22]], [[ARG]] +; CHECK-NEXT: [[REASS_MUL160:%.*]] = fmul fast float [[ARG]], [[TMP110]] +; CHECK-NEXT: [[TMP4:%.*]] = fmul fast float [[TMP311]], [[REASS_MUL160]] +; CHECK-NEXT: [[TMP5:%.*]] = fadd fast float [[ARG]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = fmul fast float [[ARG]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = fadd fast float [[ARG]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = fmul fast float [[ARG]], [[TMP7]] +; CHECK-NEXT: [[TMP9:%.*]] = fadd fast float [[ARG]], [[TMP8]] +; CHECK-NEXT: [[TMP10:%.*]] = fmul fast float [[ARG]], [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = fadd fast float [[ARG]], [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = fmul fast float [[ARG]], [[TMP11]] +; CHECK-NEXT: [[TMP13:%.*]] = fadd fast float [[ARG]], [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = fmul fast float [[ARG]], [[TMP13]] +; CHECK-NEXT: [[TMP15:%.*]] = fadd fast float [[ARG]], [[TMP14]] +; CHECK-NEXT: [[TMP16:%.*]] = fmul fast float [[ARG]], [[TMP15]] +; CHECK-NEXT: [[TMP17:%.*]] = fadd fast float [[ARG]], [[TMP16]] +; CHECK-NEXT: [[TMP18:%.*]] = fmul fast float [[ARG]], [[TMP17]] +; CHECK-NEXT: [[TMP19:%.*]] = fadd fast float [[ARG]], [[TMP18]] +; CHECK-NEXT: [[TMP20:%.*]] = fmul fast float [[ARG]], [[TMP19]] +; CHECK-NEXT: [[TMP21:%.*]] = fadd fast float [[ARG]], [[TMP20]] +; CHECK-NEXT: [[TMP22:%.*]] = fmul fast float [[ARG]], [[TMP21]] +; CHECK-NEXT: [[TMP23:%.*]] = fadd fast float [[ARG]], [[TMP22]] ; CHECK-NEXT: [[REASS_MUL166:%.*]] = fmul fast float [[ARG]], [[ARG]] ; CHECK-NEXT: [[TMP24:%.*]] = fmul fast float [[REASS_MUL166]], [[TMP23]] -; CHECK-NEXT: [[TMP25:%.*]] = fadd fast float [[TMP24]], [[ARG]] -; CHECK-NEXT: [[TMP26:%.*]] = fmul fast float [[TMP25]], [[ARG]] -; CHECK-NEXT: [[TMP27:%.*]] = fadd fast float [[TMP26]], [[ARG]] +; CHECK-NEXT: [[TMP25:%.*]] = fadd fast float [[ARG]], [[TMP24]] +; CHECK-NEXT: [[TMP26:%.*]] = fmul fast float [[ARG]], [[TMP25]] +; CHECK-NEXT: [[TMP27:%.*]] = fadd fast float [[ARG]], [[TMP26]] ; CHECK-NEXT: [[TMP29:%.*]] = fmul fast float [[ARG]], [[ARG]] ; CHECK-NEXT: [[TMP31:%.*]] = fmul fast float [[TMP29]], 0x3FEA2E8B80000000 ; CHECK-NEXT: [[TMP33:%.*]] = fmul fast float [[TMP31]], [[TMP27]] -; CHECK-NEXT: [[TMP34:%.*]] = fadd fast float [[TMP33]], [[ARG]] +; CHECK-NEXT: [[TMP34:%.*]] = fadd fast float [[ARG]], [[TMP33]] ; CHECK-NEXT: ret float [[TMP34]] ; entry: Index: test/Transforms/Reassociate/propagate-flags.ll =================================================================== --- test/Transforms/Reassociate/propagate-flags.ll +++ test/Transforms/Reassociate/propagate-flags.ll @@ -3,7 +3,7 @@ define double @func(double %a, double %b) { ; CHECK-LABEL: @func( -; CHECK-NEXT: [[TMP1:%.*]] = fmul fast double [[B:%.*]], [[A:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = fmul fast double [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = fmul fast double [[TMP1]], [[TMP1]] ; CHECK-NEXT: ret double [[TMP2]] ; Index: test/Transforms/Reassociate/reassociate_dbgvalue_discard.ll =================================================================== --- test/Transforms/Reassociate/reassociate_dbgvalue_discard.ll +++ test/Transforms/Reassociate/reassociate_dbgvalue_discard.ll @@ -11,13 +11,13 @@ define dso_local i32 @test1(i32 %a, i32 %b, i32 %c, i32 %d) local_unnamed_addr #0 !dbg !7 { ; CHECK-LABEL: @test1( ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !16, metadata !DIExpression()), !dbg !20 -; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !17, metadata !DIExpression()), !dbg !21 -; CHECK-NEXT: [[M1:%.*]] = mul i32 [[D:%.*]], [[C:%.*]], !dbg !22 -; CHECK-NEXT: [[M3:%.*]] = mul i32 [[M1]], [[A:%.*]], !dbg !23 -; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[M3]], metadata !18, metadata !DIExpression()), !dbg !24 -; CHECK-NEXT: [[M2:%.*]] = mul i32 [[D]], [[C]], !dbg !25 -; CHECK-NEXT: [[M4:%.*]] = mul i32 [[M2]], [[B:%.*]], !dbg !26 +; CHECK-NEXT: [[M1:%.*]] = mul i32 [[A:%.*]], [[C:%.*]], !dbg !20 +; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[M1]], metadata !16, metadata !DIExpression()), !dbg !21 +; CHECK-NEXT: [[M2:%.*]] = mul i32 [[B:%.*]], [[C]], !dbg !22 +; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[M2]], metadata !17, metadata !DIExpression()), !dbg !23 +; CHECK-NEXT: [[M3:%.*]] = mul i32 [[M1]], [[D:%.*]], !dbg !24 +; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[M3]], metadata !18, metadata !DIExpression()), !dbg !25 +; CHECK-NEXT: [[M4:%.*]] = mul i32 [[M2]], [[D]], !dbg !26 ; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[M4]], metadata !19, metadata !DIExpression()), !dbg !27 ; CHECK-NEXT: [[RES:%.*]] = xor i32 [[M3]], [[M4]] ; CHECK-NEXT: ret i32 [[RES]], !dbg !28 Index: test/Transforms/Reassociate/shift-factor.ll =================================================================== --- test/Transforms/Reassociate/shift-factor.ll +++ test/Transforms/Reassociate/shift-factor.ll @@ -5,7 +5,7 @@ define i32 @test1(i32 %X, i32 %Y) { ; CHECK-LABEL: @test1( -; CHECK-NEXT: [[REASS_ADD:%.*]] = add i32 [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[REASS_ADD:%.*]] = add i32 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[REASS_MUL:%.*]] = shl i32 [[REASS_ADD]], 1 ; CHECK-NEXT: ret i32 [[REASS_MUL]] ; Index: test/Transforms/Reassociate/vaarg_movable.ll =================================================================== --- test/Transforms/Reassociate/vaarg_movable.ll +++ test/Transforms/Reassociate/vaarg_movable.ll @@ -15,8 +15,8 @@ ; CHECK-NEXT: [[V0:%.*]] = va_arg i8** [[VARARGS]], i32 ; CHECK-NEXT: [[V1:%.*]] = va_arg i8** [[VARARGS]], i32 ; CHECK-NEXT: [[V0_NEG:%.*]] = sub i32 0, [[V0]] -; CHECK-NEXT: [[SUB:%.*]] = add i32 [[V0_NEG]], 1 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUB]], [[V1]] +; CHECK-NEXT: [[SUB:%.*]] = add i32 [[V1]], [[V0_NEG]] +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[SUB]], 1 ; CHECK-NEXT: call void @llvm.va_end(i8* [[VARARGS1]]) ; CHECK-NEXT: ret i32 [[ADD]] ; Index: test/Transforms/Reassociate/wrap-flags.ll =================================================================== --- test/Transforms/Reassociate/wrap-flags.ll +++ test/Transforms/Reassociate/wrap-flags.ll @@ -51,7 +51,7 @@ ; CHECK-LABEL: @pr23926( ; CHECK-NEXT: [[X1_NEG:%.*]] = sub i2 0, [[X1:%.*]] ; CHECK-NEXT: [[ADD_NEG:%.*]] = add i2 [[X1_NEG]], -1 -; CHECK-NEXT: [[SUB:%.*]] = add i2 [[ADD_NEG]], [[X2:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i2 [[X2:%.*]], [[ADD_NEG]] ; CHECK-NEXT: ret i2 [[SUB]] ; %add = add nuw i2 %X1, 1 Index: test/Transforms/Reassociate/xor_reassoc.ll =================================================================== --- test/Transforms/Reassociate/xor_reassoc.ll +++ test/Transforms/Reassociate/xor_reassoc.ll @@ -71,7 +71,7 @@ ; CHECK-LABEL: @xor3( ; CHECK-NEXT: [[AND_RA:%.*]] = and i32 [[X:%.*]], -436 ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 123 -; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[XOR]], [[AND_RA]] +; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[AND_RA]], [[XOR]] ; CHECK-NEXT: ret i32 [[XOR1]] ; %or = or i32 %x, 123 @@ -87,7 +87,7 @@ ; CHECK-LABEL: @xor3_vec( ; CHECK-NEXT: [[AND_RA:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[XOR1:%.*]] = xor <2 x i32> [[XOR]], [[AND_RA]] +; CHECK-NEXT: [[XOR1:%.*]] = xor <2 x i32> [[AND_RA]], [[XOR]] ; CHECK-NEXT: ret <2 x i32> [[XOR1]] ; %or = or <2 x i32> %x, @@ -102,7 +102,7 @@ ; CHECK-LABEL: @xor4( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], -124 ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 435 -; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[XOR]], [[AND]] +; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[AND]], [[XOR]] ; CHECK-NEXT: ret i32 [[XOR1]] ; %and = and i32 %x, -124 @@ -116,7 +116,7 @@ ; CHECK-LABEL: @xor4_vec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[XOR1:%.*]] = xor <2 x i32> [[XOR]], [[AND]] +; CHECK-NEXT: [[XOR1:%.*]] = xor <2 x i32> [[AND]], [[XOR]] ; CHECK-NEXT: ret <2 x i32> [[XOR1]] ; %and = and <2 x i32> %x, @@ -163,8 +163,8 @@ ; (x | c1) ^ (x & c1) = x ^ c1 define i32 @xor_special2(i32 %x, i32 %y) { ; CHECK-LABEL: @xor_special2( -; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[X:%.*]], 123 -; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[XOR]], [[Y:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 123 +; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[X:%.*]], [[XOR]] ; CHECK-NEXT: ret i32 [[XOR1]] ; %or = or i32 %x, 123 @@ -178,8 +178,8 @@ ; (x | c1) ^ (x & c1) = x ^ c1 define <2 x i32> @xor_special2_vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @xor_special2_vec( -; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[XOR1:%.*]] = xor <2 x i32> [[XOR]], [[Y:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i32> [[Y:%.*]], +; CHECK-NEXT: [[XOR1:%.*]] = xor <2 x i32> [[X:%.*]], [[XOR]] ; CHECK-NEXT: ret <2 x i32> [[XOR1]] ; %or = or <2 x i32> %x, @@ -247,7 +247,7 @@ ; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], 123 ; CHECK-NEXT: [[AND_RA:%.*]] = and i32 [[X]], 435 ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[AND_RA]], 435 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[XOR]], [[OR]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[OR]], [[XOR]] ; CHECK-NEXT: ret i32 [[ADD]] ; %or = or i32 %x, 123 @@ -266,8 +266,8 @@ ; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], 123 ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[X]], 456 ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[OR]], [[OR1]] -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[OR1]], [[OR]] -; CHECK-NEXT: [[ADD2:%.*]] = add i32 [[ADD]], [[XOR]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[OR]], [[XOR]] +; CHECK-NEXT: [[ADD2:%.*]] = add i32 [[OR1]], [[ADD]] ; CHECK-NEXT: ret i32 [[ADD2]] ; %or = or i32 %x, 123 @@ -313,12 +313,12 @@ ; CHECK-NEXT: [[TMP8:%.*]] = and i32 [[TMP2:%.*]], 255 ; CHECK-NEXT: [[AND_RA:%.*]] = and i32 [[TMP1]], -360490541 ; CHECK-NEXT: [[TMP9:%.*]] = xor i32 [[TMP5]], 891034567 -; CHECK-NEXT: [[TMP10:%.*]] = xor i32 [[TMP9]], [[AND_RA]] -; CHECK-NEXT: [[TMP11:%.*]] = xor i32 [[TMP10]], [[TMP7]] +; CHECK-NEXT: [[TMP10:%.*]] = xor i32 [[AND_RA]], [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = xor i32 [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[TMP12:%.*]] = and i32 [[TMP3:%.*]], 255 ; CHECK-NEXT: [[TMP13:%.*]] = add i32 [[TMP1]], 32 ; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[TMP13]], [[TMP2]] -; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[TMP14]], [[TMP8]] +; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]] ; CHECK-NEXT: [[TMP16:%.*]] = add i32 [[TMP15]], [[TMP11]] ; CHECK-NEXT: ret i32 [[TMP16]] ;