diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -969,15 +969,6 @@ } } - // If all bits affected by the add are included in a high-bit-mask, do the - // add before the mask op: - // (X & 0xFF00) + xx00 --> (X + xx00) & 0xFF00 - if (match(Op0, m_OneUse(m_And(m_Value(X), m_APInt(C2)))) && - C2->isNegative() && C2->isShiftedMask() && *C == (*C & *C2)) { - Value *NewAdd = Builder.CreateAdd(X, ConstantInt::get(Ty, *C)); - return BinaryOperator::CreateAnd(NewAdd, ConstantInt::get(Ty, *C2)); - } - return nullptr; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1744,6 +1744,49 @@ return nullptr; } +// Match +// (X + C2) | C +// (X + C2) ^ C +// (X + C2) & C +// and convert to do the bitwise logic first: +// (X | C) + C2 +// (X ^ C) + C2 +// (X & C) + C2 +// iff bits affected by logic op are lower than last bit affected by math op +static Instruction *canonicalizeLogicFirst(BinaryOperator &I, + InstCombiner::BuilderTy &Builder) { + Type *Ty = I.getType(); + Instruction::BinaryOps OpC = I.getOpcode(); + Value *Op0 = I.getOperand(0); + Value *Op1 = I.getOperand(1); + Value *X; + const APInt *C, *C2; + + if (!(match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C2)))) && + match(Op1, m_APInt(C)))) + return nullptr; + + unsigned Width = Ty->getScalarSizeInBits(); // C2->getBitWidth() instead? + unsigned LastOneMath = Width - C2->countTrailingZeros(); + + switch (OpC) { + case Instruction::And: + if (!(C->countLeadingOnes() >= LastOneMath)) + return nullptr; + break; + case Instruction::Xor: + case Instruction::Or: + if (!(C->countLeadingZeros() >= LastOneMath)) + return nullptr; + break; + default: + return nullptr; + } + + Value *NewBinOp = Builder.CreateBinOp(OpC, X, ConstantInt::get(Ty, *C)); + return BinaryOperator::CreateAdd(NewBinOp, ConstantInt::get(Ty, *C2)); +} + // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches // here. We should standardize that construct where it is needed or choose some // other way to ensure that commutated variants of patterns are not missed. @@ -2204,6 +2247,9 @@ if (matchSimpleRecurrence(&I, PN, Start, Step) && DT.dominates(Step, PN)) return replaceInstUsesWith(I, Builder.CreateAnd(Start, Step)); + if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder)) + return Canonicalized; + return nullptr; } @@ -3143,6 +3189,9 @@ Builder.CreateOr(C, Builder.CreateAnd(A, B)), D); } + if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder)) + return Canonicalized; + return nullptr; } @@ -3880,5 +3929,8 @@ m_Value(Y)))) return BinaryOperator::CreateXor(Builder.CreateXor(X, Y), C1); + if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder)) + return Canonicalized; + return nullptr; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -2605,7 +2605,10 @@ const APInt *BiasCst, *HighBitMaskCst; if (!match(XBiasedHighBits, m_And(m_Add(m_Specific(X), m_APIntAllowUndef(BiasCst)), - m_APIntAllowUndef(HighBitMaskCst)))) + m_APIntAllowUndef(HighBitMaskCst))) && + !match(XBiasedHighBits, + m_Add(m_And(m_Specific(X), m_APIntAllowUndef(HighBitMaskCst)), + m_APIntAllowUndef(BiasCst)))) return nullptr; if (!LowBitMaskCst->isMask()) diff --git a/llvm/test/Transforms/InstCombine/and-xor-or.ll b/llvm/test/Transforms/InstCombine/and-xor-or.ll --- a/llvm/test/Transforms/InstCombine/and-xor-or.ll +++ b/llvm/test/Transforms/InstCombine/and-xor-or.ll @@ -4360,8 +4360,8 @@ define i32 @canonicalize_logic_first_or0(i32 %x) { ; CHECK-LABEL: define {{[^@]+}}@canonicalize_logic_first_or0 ; CHECK-SAME: (i32 [[X:%.*]]) { -; CHECK-NEXT: [[A:%.*]] = add i32 [[X]], 112 -; CHECK-NEXT: [[R:%.*]] = or i32 [[A]], 15 +; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[X]], 15 +; CHECK-NEXT: [[R:%.*]] = add i32 [[TMP1]], 112 ; CHECK-NEXT: ret i32 [[R]] ; %a = add i32 %x, 112 ; 01110000 @@ -4372,8 +4372,8 @@ define <2 x i32> @canonicalize_logic_first_or_vector0(<2 x i32> %x) { ; CHECK-LABEL: define {{[^@]+}}@canonicalize_logic_first_or_vector0 ; CHECK-SAME: (<2 x i32> [[X:%.*]]) { -; CHECK-NEXT: [[A:%.*]] = add <2 x i32> [[X]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i32> [[A]], +; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[X]], +; CHECK-NEXT: [[R:%.*]] = add <2 x i32> [[TMP1]], ; CHECK-NEXT: ret <2 x i32> [[R]] ; %a = add <2 x i32> , %x ; <0xFF800000, 0x7B800000> @@ -4435,8 +4435,8 @@ define i8 @canonicalize_logic_first_and0(i8 %x) { ; CHECK-LABEL: define {{[^@]+}}@canonicalize_logic_first_and0 ; CHECK-SAME: (i8 [[X:%.*]]) { -; CHECK-NEXT: [[B:%.*]] = add i8 [[X]], 48 -; CHECK-NEXT: [[R:%.*]] = and i8 [[B]], -10 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -10 +; CHECK-NEXT: [[R:%.*]] = add i8 [[TMP1]], 48 ; CHECK-NEXT: ret i8 [[R]] ; %b = add i8 %x, 48 ; 00110000 @@ -4447,8 +4447,8 @@ define <2 x i8> @canonicalize_logic_first_and_vector0(<2 x i8> %x) { ; CHECK-LABEL: define {{[^@]+}}@canonicalize_logic_first_and_vector0 ; CHECK-SAME: (<2 x i8> [[X:%.*]]) { -; CHECK-NEXT: [[A:%.*]] = add <2 x i8> [[X]], -; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[A]], +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X]], +; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], ; CHECK-NEXT: ret <2 x i8> [[R]] ; %a = add <2 x i8> , %x @@ -4524,8 +4524,8 @@ define i8 @canonicalize_logic_first_xor_0(i8 %x) { ; CHECK-LABEL: define {{[^@]+}}@canonicalize_logic_first_xor_0 ; CHECK-SAME: (i8 [[X:%.*]]) { -; CHECK-NEXT: [[A:%.*]] = add i8 [[X]], 96 -; CHECK-NEXT: [[R:%.*]] = xor i8 [[A]], 31 +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], 31 +; CHECK-NEXT: [[R:%.*]] = add i8 [[TMP1]], 96 ; CHECK-NEXT: ret i8 [[R]] ; %a = add i8 %x, 96 ; 01100000 @@ -4536,8 +4536,8 @@ define <2 x i32> @canonicalize_logic_first_xor_vector0(<2 x i32> %x) { ; CHECK-LABEL: define {{[^@]+}}@canonicalize_logic_first_xor_vector0 ; CHECK-SAME: (<2 x i32> [[X:%.*]]) { -; CHECK-NEXT: [[A:%.*]] = add <2 x i32> [[X]], -; CHECK-NEXT: [[R:%.*]] = xor <2 x i32> [[A]], +; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[X]], +; CHECK-NEXT: [[R:%.*]] = add <2 x i32> [[TMP1]], ; CHECK-NEXT: ret <2 x i32> [[R]] ; %a = add <2 x i32> , %x ; <0xFF800000, 0xFF800000> diff --git a/llvm/test/Transforms/InstCombine/integer-round-up-pow2-alignment.ll b/llvm/test/Transforms/InstCombine/integer-round-up-pow2-alignment.ll --- a/llvm/test/Transforms/InstCombine/integer-round-up-pow2-alignment.ll +++ b/llvm/test/Transforms/InstCombine/integer-round-up-pow2-alignment.ll @@ -10,8 +10,8 @@ ; Basic pattern define i8 @t0(i8 %x) { ; CHECK-LABEL: @t0( -; CHECK-NEXT: [[X_BIASED1:%.*]] = add i8 [[X:%.*]], 15 -; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and i8 [[X_BIASED1]], -16 +; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X:%.*]], 15 +; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and i8 [[X_BIASED]], -16 ; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]] ; %x.lowbits = and i8 %x, 15 @@ -25,8 +25,8 @@ ; Another alignment is fine define i8 @t1(i8 %x) { ; CHECK-LABEL: @t1( -; CHECK-NEXT: [[X_BIASED1:%.*]] = add i8 [[X:%.*]], 31 -; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and i8 [[X_BIASED1]], -32 +; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X:%.*]], 31 +; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and i8 [[X_BIASED]], -32 ; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]] ; %x.lowbits = and i8 %x, 31 @@ -58,8 +58,8 @@ ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X:%.*]], 15 ; CHECK-NEXT: [[X_LOWBITS_ARE_NOT_ZERO:%.*]] = icmp ne i8 [[X_LOWBITS]], 0 ; CHECK-NEXT: call void @use.i1(i1 [[X_LOWBITS_ARE_NOT_ZERO]]) -; CHECK-NEXT: [[X_BIASED1:%.*]] = add i8 [[X]], 15 -; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and i8 [[X_BIASED1]], -16 +; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X]], 15 +; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and i8 [[X_BIASED]], -16 ; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]] ; %x.lowbits = and i8 %x, 15 @@ -74,8 +74,8 @@ ; Basic splat vector test define <2 x i8> @t4_splat(<2 x i8> %x) { ; CHECK-LABEL: @t4_splat( -; CHECK-NEXT: [[X_BIASED1:%.*]] = add <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and <2 x i8> [[X_BIASED1]], +; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and <2 x i8> [[X_BIASED]], ; CHECK-NEXT: ret <2 x i8> [[X_ROUNDEDUP]] ; %x.lowbits = and <2 x i8> %x, @@ -115,8 +115,8 @@ } define <2 x i8> @t5_splat_undef_0b0100(<2 x i8> %x) { ; CHECK-LABEL: @t5_splat_undef_0b0100( -; CHECK-NEXT: [[X_BIASED1:%.*]] = add <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and <2 x i8> [[X_BIASED1]], +; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and <2 x i8> [[X_BIASED]], ; CHECK-NEXT: ret <2 x i8> [[X_ROUNDEDUP]] ; %x.lowbits = and <2 x i8> %x, @@ -128,8 +128,8 @@ } define <2 x i8> @t5_splat_undef_0b1000(<2 x i8> %x) { ; CHECK-LABEL: @t5_splat_undef_0b1000( -; CHECK-NEXT: [[X_BIASED1:%.*]] = add <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and <2 x i8> [[X_BIASED1]], +; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and <2 x i8> [[X_BIASED]], ; CHECK-NEXT: ret <2 x i8> [[X_ROUNDEDUP]] ; %x.lowbits = and <2 x i8> %x, @@ -247,8 +247,8 @@ ; CHECK-LABEL: @n9_wrong_x0( ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X_0:%.*]], 15 ; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0 -; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X_0]], 16 -; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X_0]], -16 +; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16 ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X_1:%.*]], i8 [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]] ; @@ -263,8 +263,8 @@ ; CHECK-LABEL: @n9_wrong_x1( ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X_0:%.*]], 15 ; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0 -; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X_1:%.*]], 16 -; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X_1:%.*]], -16 +; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16 ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X_0]], i8 [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]] ; @@ -279,8 +279,8 @@ ; CHECK-LABEL: @n9_wrong_x2( ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X_1:%.*]], 15 ; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0 -; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X_0:%.*]], 16 -; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X_0:%.*]], -16 +; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16 ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X_0]], i8 [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]] ; @@ -297,8 +297,8 @@ ; CHECK-LABEL: @n10_wrong_low_bit_mask( ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X:%.*]], 31 ; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0 -; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X]], 16 -; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -16 +; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16 ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X]], i8 [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]] ; @@ -333,8 +333,8 @@ ; CHECK-LABEL: @n12_wrong_bias( ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X:%.*]], 15 ; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0 -; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X]], 32 -; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -16 +; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 32 ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X]], i8 [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]] ; @@ -369,8 +369,8 @@ ; CHECK-LABEL: @n14_wrong_comparison_constant( ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X:%.*]], 15 ; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 1 -; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X]], 16 -; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -16 +; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16 ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X]], i8 [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]] ; @@ -387,8 +387,8 @@ ; CHECK-LABEL: @n15_wrong_comparison_predicate_and_constant( ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X:%.*]], 14 ; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0 -; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X]], 16 -; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -16 +; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16 ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X]], i8 [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]] ; @@ -405,8 +405,8 @@ ; CHECK-LABEL: @n16_oneuse( ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X:%.*]], 15 ; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0 -; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X]], 16 -; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -16 +; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16 ; CHECK-NEXT: call void @use.i8(i8 [[X_BIASED_HIGHBITS]]) ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X]], i8 [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]] diff --git a/llvm/test/Transforms/InstCombine/or.ll b/llvm/test/Transforms/InstCombine/or.ll --- a/llvm/test/Transforms/InstCombine/or.ll +++ b/llvm/test/Transforms/InstCombine/or.ll @@ -154,10 +154,14 @@ ret i32 %z } +; TODO: This should combine to t1 + 2. define i32 @test21(i32 %t1) { ; CHECK-LABEL: @test21( -; CHECK-NEXT: [[T1_MASK1:%.*]] = add i32 [[T1:%.*]], 2 -; CHECK-NEXT: ret i32 [[T1_MASK1]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[T1:%.*]], -2 +; CHECK-NEXT: [[T3:%.*]] = add i32 [[TMP1]], 2 +; CHECK-NEXT: [[T5:%.*]] = and i32 [[T1]], 1 +; CHECK-NEXT: [[T6:%.*]] = or i32 [[T5]], [[T3]] +; CHECK-NEXT: ret i32 [[T6]] ; %t1.mask1 = add i32 %t1, 2 %t3 = and i32 %t1.mask1, -2 diff --git a/llvm/test/Transforms/InstCombine/sub.ll b/llvm/test/Transforms/InstCombine/sub.ll --- a/llvm/test/Transforms/InstCombine/sub.ll +++ b/llvm/test/Transforms/InstCombine/sub.ll @@ -1490,8 +1490,8 @@ define i8 @sub_mask_lowbits(i8 %x) { ; CHECK-LABEL: @sub_mask_lowbits( -; CHECK-NEXT: [[A1:%.*]] = add i8 [[X:%.*]], -108 -; CHECK-NEXT: [[R:%.*]] = and i8 [[A1]], -4 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], -4 +; CHECK-NEXT: [[R:%.*]] = add i8 [[TMP1]], -108 ; CHECK-NEXT: ret i8 [[R]] ; %a1 = add i8 %x, 148 ; 0x94 @@ -1517,10 +1517,10 @@ define <2 x i8> @sub_mask_lowbits_splat_extra_use(<2 x i8> %x, <2 x i8>* %p) { ; CHECK-LABEL: @sub_mask_lowbits_splat_extra_use( -; CHECK-NEXT: [[A1:%.*]] = add <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[A2:%.*]] = and <2 x i8> [[X]], +; CHECK-NEXT: [[A2:%.*]] = and <2 x i8> [[X:%.*]], ; CHECK-NEXT: store <2 x i8> [[A2]], <2 x i8>* [[P:%.*]], align 2 -; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[A1]], +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X]], +; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], ; CHECK-NEXT: ret <2 x i8> [[R]] ; %a1 = add <2 x i8> %x, ; 0xc0