Index: lib/Analysis/ValueTracking.cpp =================================================================== --- lib/Analysis/ValueTracking.cpp +++ lib/Analysis/ValueTracking.cpp @@ -462,6 +462,20 @@ return m_CombineOr(m_And(L, R), m_And(R, L)); } +template +inline match_combine_or, + BinaryOp_match> +m_c_Or(const LHS &L, const RHS &R) { + return m_CombineOr(m_Or(L, R), m_Or(R, L)); +} + +template +inline match_combine_or, + BinaryOp_match> +m_c_Xor(const LHS &L, const RHS &R) { + return m_CombineOr(m_Xor(L, R), m_Xor(R, L)); +} + static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero, APInt &KnownOne, const DataLayout *DL, @@ -493,6 +507,7 @@ m_BitCast(m_Specific(V)))); CmpInst::Predicate Pred; + ConstantInt *C; // assume(v = a) if (match(I, m_Intrinsic( m_c_ICmp(Pred, m_V, m_Value(A)))) && @@ -514,6 +529,203 @@ // known bits from the RHS to V. KnownZero |= RHSKnownZero & MaskKnownOne; KnownOne |= RHSKnownOne & MaskKnownOne; + // assume(~(v & b) = a) + } else if (match(I, m_Intrinsic( + m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), + m_Value(A)))) && + Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); + computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I)); + + // For those bits in the mask that are known to be one, we can propagate + // inverted known bits from the RHS to V. + KnownZero |= RHSKnownOne & MaskKnownOne; + KnownOne |= RHSKnownZero & MaskKnownOne; + // assume(v | b = a) + } else if (match(I, m_Intrinsic( + m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A)))) && + Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); + computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); + + // For those bits in B that are known to be zero, we can propagate known + // bits from the RHS to V. + KnownZero |= RHSKnownZero & BKnownZero; + KnownOne |= RHSKnownOne & BKnownZero; + // assume(~(v | b) = a) + } else if (match(I, m_Intrinsic( + m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), + m_Value(A)))) && + Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); + computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); + + // For those bits in B that are known to be zero, we can propagate + // inverted known bits from the RHS to V. + KnownZero |= RHSKnownOne & BKnownZero; + KnownOne |= RHSKnownZero & BKnownZero; + // assume(v ^ b = a) + } else if (match(I, m_Intrinsic( + m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A)))) && + Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); + computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); + + // For those bits in B that are known to be zero, we can propagate known + // bits from the RHS to V. For those bits in B that are known to be one, + // we can propagate inverted known bits from the RHS to V. + KnownZero |= RHSKnownZero & BKnownZero; + KnownOne |= RHSKnownOne & BKnownZero; + KnownZero |= RHSKnownOne & BKnownOne; + KnownOne |= RHSKnownZero & BKnownOne; + // assume(~(v ^ b) = a) + } else if (match(I, m_Intrinsic( + m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), + m_Value(A)))) && + Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); + computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); + + // For those bits in B that are known to be zero, we can propagate + // inverted known bits from the RHS to V. For those bits in B that are + // known to be one, we can propagate known bits from the RHS to V. + KnownZero |= RHSKnownOne & BKnownZero; + KnownOne |= RHSKnownZero & BKnownZero; + KnownZero |= RHSKnownZero & BKnownOne; + KnownOne |= RHSKnownOne & BKnownOne; + // assume(v << c = a) + } else if (match(I, m_Intrinsic( + m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), + m_Value(A)))) && + Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + // For those bits in RHS that are known, we can propagate them to known + // bits in V shifted to the right by C. + KnownZero |= RHSKnownZero.lshr(C->getZExtValue()); + KnownOne |= RHSKnownOne.lshr(C->getZExtValue()); + // assume(~(v << c) = a) + } else if (match(I, m_Intrinsic( + m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), + m_Value(A)))) && + Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + // For those bits in RHS that are known, we can propagate them inverted + // to known bits in V shifted to the right by C. + KnownZero |= RHSKnownOne.lshr(C->getZExtValue()); + KnownOne |= RHSKnownZero.lshr(C->getZExtValue()); + // assume(v >> c = a) + } else if (match(I, m_Intrinsic( + m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)), + m_AShr(m_V, + m_ConstantInt(C))), + m_Value(A)))) && + Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + // For those bits in RHS that are known, we can propagate them to known + // bits in V shifted to the right by C. + KnownZero |= RHSKnownZero << C->getZExtValue(); + KnownOne |= RHSKnownOne << C->getZExtValue(); + // assume(~(v >> c) = a) + } else if (match(I, m_Intrinsic( + m_c_ICmp(Pred, m_Not(m_CombineOr( + m_LShr(m_V, m_ConstantInt(C)), + m_AShr(m_V, m_ConstantInt(C)))), + m_Value(A)))) && + Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + // For those bits in RHS that are known, we can propagate them inverted + // to known bits in V shifted to the right by C. + KnownZero |= RHSKnownOne << C->getZExtValue(); + KnownOne |= RHSKnownZero << C->getZExtValue(); + // assume(v >=_s c) where c is non-negative + } else if (match(I, m_Intrinsic( + m_ICmp(Pred, m_V, m_Value(A)))) && + Pred == ICmpInst::ICMP_SGE && + isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + + if (RHSKnownZero.isNegative()) { + // We know that the sign bit is zero. + KnownZero |= APInt::getSignBit(BitWidth); + } + // assume(v >_s c) where c is at least -1. + } else if (match(I, m_Intrinsic( + m_ICmp(Pred, m_V, m_Value(A)))) && + Pred == ICmpInst::ICMP_SGT && + isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + + if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) { + // We know that the sign bit is zero. + KnownZero |= APInt::getSignBit(BitWidth); + } + // assume(v <=_s c) where c is negative + } else if (match(I, m_Intrinsic( + m_ICmp(Pred, m_V, m_Value(A)))) && + Pred == ICmpInst::ICMP_SLE && + isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + + if (RHSKnownOne.isNegative()) { + // We know that the sign bit is one. + KnownOne |= APInt::getSignBit(BitWidth); + } + // assume(v <_s c) where c is non-positive + } else if (match(I, m_Intrinsic( + m_ICmp(Pred, m_V, m_Value(A)))) && + Pred == ICmpInst::ICMP_SLT && + isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + + if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) { + // We know that the sign bit is one. + KnownOne |= APInt::getSignBit(BitWidth); + } + // assume(v <=_u c) + } else if (match(I, m_Intrinsic( + m_ICmp(Pred, m_V, m_Value(A)))) && + Pred == ICmpInst::ICMP_ULE && + isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + + // Whatever high bits in c are zero are known to be zero. + KnownZero |= + APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); + // assume(v <_u c) + } else if (match(I, m_Intrinsic( + m_ICmp(Pred, m_V, m_Value(A)))) && + Pred == ICmpInst::ICMP_ULT && + isValidAssumeForContext(I, Q, DL)) { + APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); + computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); + + // Whatever high bits in c are zero are known to be zero (if c is a power + // of 2, then one more). + if (isKnownToBeAPowerOfTwo(A, false, Depth+1, Query(Q, I))) + KnownZero |= + APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1); + else + KnownZero |= + APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); } } } Index: lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -250,6 +250,12 @@ assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?"); assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); + // If the client is only demanding bits that we know, return the known + // constant. + if ((DemandedMask & ((RHSKnownZero | LHSKnownZero)| + (RHSKnownOne & LHSKnownOne))) == DemandedMask) + return Constant::getIntegerValue(VTy, RHSKnownOne & LHSKnownOne); + // If all of the demanded bits are known 1 on one side, return the other. // These bits cannot contribute to the result of the 'and'. if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) == @@ -282,6 +288,12 @@ assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?"); assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); + // If the client is only demanding bits that we know, return the known + // constant. + if ((DemandedMask & ((RHSKnownZero & LHSKnownZero)| + (RHSKnownOne | LHSKnownOne))) == DemandedMask) + return Constant::getIntegerValue(VTy, RHSKnownOne | LHSKnownOne); + // If all of the demanded bits are known zero on one side, return the other. // These bits cannot contribute to the result of the 'or'. if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) == @@ -318,6 +330,18 @@ assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?"); assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); + // Output known-0 bits are known if clear or set in both the LHS & RHS. + APInt IKnownZero = (RHSKnownZero & LHSKnownZero) | + (RHSKnownOne & LHSKnownOne); + // Output known-1 are known to be set if set in only one of the LHS, RHS. + APInt IKnownOne = (RHSKnownZero & LHSKnownOne) | + (RHSKnownOne & LHSKnownZero); + + // If the client is only demanding bits that we know, return the known + // constant. + if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) + return Constant::getIntegerValue(VTy, IKnownOne); + // If all of the demanded bits are known zero on one side, return the other. // These bits cannot contribute to the result of the 'xor'. if ((DemandedMask & RHSKnownZero) == DemandedMask) Index: test/Transforms/InstCombine/assume2.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/assume2.ll @@ -0,0 +1,174 @@ +; RUN: opt < %s -instcombine -S | FileCheck %s +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; Function Attrs: nounwind +declare void @llvm.assume(i1) #1 + +; Function Attrs: nounwind uwtable +define i32 @test1(i32 %a) #0 { +entry: +; CHECK-LABEL: @test1 +; CHECK: call void @llvm.assume +; CHECK: ret i32 5 + + %and = and i32 %a, 15 + %cmp = icmp eq i32 %and, 5 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 7 + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @test2(i32 %a) #0 { +entry: +; CHECK-LABEL: @test2 +; CHECK: call void @llvm.assume +; CHECK: ret i32 2 + + %and = and i32 %a, 15 + %nand = xor i32 %and, -1 + %cmp = icmp eq i32 %nand, 4294967285 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 7 + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @test3(i32 %a) #0 { +entry: +; CHECK-LABEL: @test3 +; CHECK: call void @llvm.assume +; CHECK: ret i32 5 + + %v = or i32 %a, 4294967280 + %cmp = icmp eq i32 %v, 4294967285 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 7 + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @test4(i32 %a) #0 { +entry: +; CHECK-LABEL: @test4 +; CHECK: call void @llvm.assume +; CHECK: ret i32 2 + + %v = or i32 %a, 4294967280 + %nv = xor i32 %v, -1 + %cmp = icmp eq i32 %nv, 5 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 7 + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @test5(i32 %a) #0 { +entry: +; CHECK-LABEL: @test5 +; CHECK: call void @llvm.assume +; CHECK: ret i32 4 + + %v = xor i32 %a, 1 + %cmp = icmp eq i32 %v, 5 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 7 + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @test6(i32 %a) #0 { +entry: +; CHECK-LABEL: @test6 +; CHECK: call void @llvm.assume +; CHECK: ret i32 5 + + %v = shl i32 %a, 2 + %cmp = icmp eq i32 %v, 20 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 63 + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @test7(i32 %a) #0 { +entry: +; CHECK-LABEL: @test7 +; CHECK: call void @llvm.assume +; CHECK: ret i32 20 + + %v = lshr i32 %a, 2 + %cmp = icmp eq i32 %v, 5 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 252 + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @test8(i32 %a) #0 { +entry: +; CHECK-LABEL: @test8 +; CHECK: call void @llvm.assume +; CHECK: ret i32 20 + + %v = lshr i32 %a, 2 + %cmp = icmp eq i32 %v, 5 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 252 + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @test9(i32 %a) #0 { +entry: +; CHECK-LABEL: @test9 +; CHECK: call void @llvm.assume +; CHECK: ret i32 0 + + %cmp = icmp sgt i32 %a, 5 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 2147483648 + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @test10(i32 %a) #0 { +entry: +; CHECK-LABEL: @test10 +; CHECK: call void @llvm.assume +; CHECK: ret i32 -2147483648 + + %cmp = icmp sle i32 %a, -2 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 2147483648 + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @test11(i32 %a) #0 { +entry: +; CHECK-LABEL: @test11 +; CHECK: call void @llvm.assume +; CHECK: ret i32 0 + + %cmp = icmp ule i32 %a, 256 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 3072 + ret i32 %and1 +} + +attributes #0 = { nounwind uwtable } +attributes #1 = { nounwind } +