Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -1682,6 +1682,16 @@ N0C->getAPIntValue(), DL, VT), N0.getOperand(1)); } + // fold (A|c1)+c2 = A+(c1+c2) where A and c1 have no common bits. + if (N0.getOpcode() == ISD::OR) + if (ConstantSDNode *N0C = getAsNonOpaqueConstant(N0.getOperand(1))) + if (DAG.haveNoCommonBitsSet(SDValue(N0C, 0), N0.getOperand(0))) { + SDLoc DL(N); + return DAG.getNode(ISD::ADD, DL, VT, + DAG.getConstant(N1C->getAPIntValue()+ + N0C->getAPIntValue(), DL, VT), + N0.getOperand(0)); + } } // reassociate add if (SDValue RADD = ReassociateOps(ISD::ADD, SDLoc(N), N0, N1)) @@ -10058,6 +10068,116 @@ return false; } +namespace { +/// Helper struct to parse and store a memory address as base + index + offset. +/// We ignore sign extensions when it is safe to do so. +/// The following two expressions are not equivalent. To differentiate we need +/// to store whether there was a sign extension involved in the index +/// computation. +/// (load (i64 add (i64 copyfromreg %c) +/// (i64 signextend (add (i8 load %index) +/// (i8 1)))) +/// vs +/// +/// (load (i64 add (i64 copyfromreg %c) +/// (i64 signextend (i32 add (i32 signextend (i8 load %index)) +/// (i32 1))))) +struct BaseIndexOffset { + SDValue Base; + SDValue Index; + int64_t Offset; + bool IsIndexSignExt; + + BaseIndexOffset() : Offset(0), IsIndexSignExt(false) {} + + BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset, + bool IsIndexSignExt) : + Base(Base), Index(Index), Offset(Offset), IsIndexSignExt(IsIndexSignExt) {} + + bool equalBaseIndex(const BaseIndexOffset &Other) { + return Other.Base == Base && Other.Index == Index && + Other.IsIndexSignExt == IsIndexSignExt; + } + + /// Parses tree in Ptr for base, index, offset addresses. + static BaseIndexOffset match(SDValue Ptr, SelectionDAG &DAG) { + bool IsIndexSignExt = false; + + // Split up a folded GlobalAddress+Offset into its component parts. + if (GlobalAddressSDNode *GA = dyn_cast(Ptr)) + if (GA->getOpcode() == ISD::GlobalAddress && GA->getOffset() != 0) { + return BaseIndexOffset(DAG.getGlobalAddress(GA->getGlobal(), + SDLoc(GA), + GA->getValueType(0), + /*Offset=*/0, + /*isTargetGA=*/false, + GA->getTargetFlags()), + SDValue(), + GA->getOffset(), + IsIndexSignExt); + } + + // We only can pattern match BASE + INDEX + OFFSET. If Ptr is not an ADD + // instruction, then it could be just the BASE or everything else we don't + // know how to handle. Just use Ptr as BASE and give up. + if (Ptr->getOpcode() != ISD::ADD && + !(Ptr->getOpcode() == ISD::OR && + DAG.haveNoCommonBitsSet(Ptr->getOperand(0), + Ptr->getOperand(1)))) + return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); + + // We know that we have at least an ADD instruction. Try to pattern match + // the simple case of BASE + OFFSET. + if (isa(Ptr->getOperand(1))) { + int64_t Offset = cast(Ptr->getOperand(1))->getSExtValue(); + return BaseIndexOffset(Ptr->getOperand(0), SDValue(), Offset, + IsIndexSignExt); + } + + // Inside a loop the current BASE pointer is calculated using an ADD and a + // MUL instruction. In this case Ptr is the actual BASE pointer. + // (i64 add (i64 %array_ptr) + // (i64 mul (i64 %induction_var) + // (i64 %element_size))) + if (Ptr->getOperand(1)->getOpcode() == ISD::MUL) + return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); + + // Look at Base + Index + Offset cases. + SDValue Base = Ptr->getOperand(0); + SDValue IndexOffset = Ptr->getOperand(1); + + // Skip signextends. + if (IndexOffset->getOpcode() == ISD::SIGN_EXTEND) { + IndexOffset = IndexOffset->getOperand(0); + IsIndexSignExt = true; + } + + // Either the case of Base + Index (no offset) or something else. + if (IndexOffset->getOpcode() != ISD::ADD && + !(IndexOffset->getOpcode() == ISD::OR && + DAG.haveNoCommonBitsSet(IndexOffset->getOperand(0), + IndexOffset->getOperand(1)))) + return BaseIndexOffset(Base, IndexOffset, 0, IsIndexSignExt); + + // Now we have the case of Base + Index + offset. + SDValue Index = IndexOffset->getOperand(0); + SDValue Offset = IndexOffset->getOperand(1); + + if (!isa(Offset)) + return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); + + // Ignore signextends. + if (Index->getOpcode() == ISD::SIGN_EXTEND) { + Index = Index->getOperand(0); + IsIndexSignExt = true; + } else IsIndexSignExt = false; + + int64_t Off = cast(Offset)->getSExtValue(); + return BaseIndexOffset(Base, Index, Off, IsIndexSignExt); + } +}; +} // namespace + /// \brief Return the base-pointer arithmetic from an indexed \p LD. SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) { ISD::MemIndexedMode AM = LD->getAddressingMode(); @@ -10150,16 +10270,355 @@ } } + bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA + : DAG.getSubtarget().useAA(); +#ifndef NDEBUG + if (CombinerAAOnlyFunc.getNumOccurrences() && + CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) + UseAA = false; +#endif + // If this load is directly stored, replace the load value with the stored // value. - // TODO: Handle store large -> read small portion. - // TODO: Handle TRUNCSTORE/LOADEXT - if (ISD::isNormalLoad(N) && !LD->isVolatile()) { - if (ISD::isNON_TRUNCStore(Chain.getNode())) { - StoreSDNode *PrevST = cast(Chain); - if (PrevST->getBasePtr() == Ptr && - PrevST->getValue().getValueType() == N->getValueType(0)) - return CombineTo(N, Chain.getOperand(1), Chain); + if (OptLevel != CodeGenOpt::None && !LD->isVolatile() && + LD->getMemoryVT().isByteSized()) { + SDLoc dl(N); + + // First, collect all candidate stores. This is only the chain operand, + // except that we recurse through TokenFactor nodes (we're depending here + // on FindBetterChain to have done the hard work already). + SmallVector Stores; + if (auto *ST = dyn_cast(Chain)) { + Stores.push_back(ST); + } else if (Chain.getOpcode() == ISD::TokenFactor) { + for (auto &Op : Chain->ops()) + if (auto *ST = dyn_cast(Op)) + Stores.push_back(ST); + } + + // FIXME: When we start calling FindBetterChain regardless of UseAA, we can + // drop this... + if (!UseAA) { + SmallVector Aliases; + + GatherAllAliases(N, Chain, Aliases); + for (auto &Op : Aliases) + if (auto *ST = dyn_cast(Op)) + Stores.push_back(ST); + } + + if (!Stores.empty()) { + auto GetMemInfo = [&](const LSBaseSDNode *LS, BaseIndexOffset& LSPtrInfo, + unsigned &LSSize, SDValue &LSOffset, + ISD::MemIndexedMode &LSAM) { + LSPtrInfo = BaseIndexOffset::match(LS->getBasePtr(), DAG); + LSSize = LS->getMemoryVT().getStoreSize(); + + // We don't need to consider post-inc/dec addressing modes (they don't + // affect the offset of the present access). For the pre-inc/dec modes, + // we either have a constant increment (in which case we can simply use + // it to adjust the existing constant offset and otherwise ignore the + // addressing mode), or we have a variable increment (in which case the + // addressing mode and the increment value must match between the load + // and the store). + LSAM = ISD::UNINDEXED; + if (LS->getAddressingMode() == ISD::PRE_INC || + LS->getAddressingMode() == ISD::PRE_DEC) { + if (auto *C = dyn_cast(LS->getOffset())) { + LSPtrInfo.Offset += + (LS->getAddressingMode() == ISD::PRE_DEC ? -1 : 1) * + C->getSExtValue(); + } else { + LSOffset = LS->getOffset(); + LSAM = LS->getAddressingMode(); + } + } + }; + + BaseIndexOffset LDPtrInfo; + unsigned LDSize; + SDValue LDOffset; + ISD::MemIndexedMode LDAM; + GetMemInfo(LD, LDPtrInfo, LDSize, LDOffset, LDAM); + + for (auto *ST : Stores) { + if (!ST->getMemoryVT().isByteSized()) + continue; + + BaseIndexOffset STPtrInfo; + unsigned STSize; + SDValue STOffset; + ISD::MemIndexedMode STAM; + GetMemInfo(ST, STPtrInfo, STSize, STOffset, STAM); + + if (LDPtrInfo.equalBaseIndex(STPtrInfo) && + LDAM == STAM && LDOffset == STOffset) { + if (LDPtrInfo.Offset >= STPtrInfo.Offset && + LDPtrInfo.Offset + LDSize <= STPtrInfo.Offset + STSize) { + auto CheckOrGenerate = [&](bool Check, SDValue &Value) { + Value = ST->getValue(); + EVT ValueVT = Value.getValueType(); + if (ST->isTruncatingStore()) { + bool IsFPTrunc = ValueVT.isFloatingPoint(); + // For floating-point types, we must actually perform the + // truncation (which is an fp_round operation to the smaller + // type). The same is true for vector types. For integers, we + // can perform the truncation, but we can also just ignore the + // upper part of the register. + bool ActuallyTrunc = true; + if (LegalTypes && !isTypeLegal(ST->getMemoryVT())) + ActuallyTrunc = false; + else if (LegalOperations && + !TLI.isOperationLegalOrCustom( + IsFPTrunc ? ISD::FP_ROUND : ISD::TRUNCATE, + ST->getMemoryVT())) + ActuallyTrunc = false; + + if (Check) { + if (!ActuallyTrunc && (IsFPTrunc || ValueVT.isVector())) + return false; + } else if (ActuallyTrunc) { + if (IsFPTrunc) + Value = DAG.getNode(ISD::FP_ROUND, dl, ST->getMemoryVT(), + Value, DAG.getTargetConstant(0, dl, + TLI.getPointerTy( + DAG.getDataLayout()))); + else + Value = + DAG.getNode(ISD::TRUNCATE, dl, ST->getMemoryVT(), Value); + + AddToWorklist(Value.getNode()); + } + + if (ActuallyTrunc) + ValueVT = ST->getMemoryVT(); + } + + if (LDSize < STSize) { + EVT STIntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); + if (Check) { + if (LegalTypes && !isTypeLegal(STIntVT)) + return false; + if (LegalOperations && + !TLI.isOperationLegalOrCustom(ISD::BITCAST, STIntVT)) + return false; + } else { + Value = DAG.getNode(ISD::BITCAST, dl, STIntVT, Value); + AddToWorklist(Value.getNode()); + } + + ValueVT = STIntVT; + + if (LDPtrInfo.Offset > STPtrInfo.Offset) { + // For little-Endian, the offset delta gives the number of + // low-order bits skipped before the beginning of the loaded + // value. For big-Endian, the offset delta gives the number of + // high-order bits skipped after the end of the loaded value. + // The number of low-order bits skipped is given by the delta + // in the end-byte addresses. + int64_t DiffBits = LDPtrInfo.Offset - STPtrInfo.Offset; + if (!DAG.getDataLayout().isLittleEndian()) + DiffBits -= STSize - LDSize; + DiffBits *= 8; + + if (Check) { + if (LegalOperations && + !TLI.isOperationLegalOrCustom(ISD::SRL, STIntVT)) + return false; + } else { + Value = DAG.getNode(ISD::SRL, dl, STIntVT, Value, + DAG.getConstant(DiffBits, dl, + getShiftAmountTy(STIntVT))); + AddToWorklist(Value.getNode()); + } + } + + // If the loaded type is smaller than the stored type (or the + // original type, if we had a truncating store and we didn't + // actually truncate), then we have two options: + // 1. We can truncate the value (and then actually extend + // later if necessary). + // 2. Don't truncate, and then later extend in-register if + // necessary. + + bool ActuallyTrunc = true; + EVT LDIntVT = EVT::getIntegerVT(*DAG.getContext(), 8*LDSize); + if (LegalTypes && !isTypeLegal(LDIntVT)) + ActuallyTrunc = false; + else if (LegalOperations && LDIntVT != ValueVT && + !TLI.isOperationLegalOrCustom(ISD::TRUNCATE, LDIntVT)) + ActuallyTrunc = false; + + if (ActuallyTrunc) { + if (!Check) { + Value = DAG.getNode(ISD::TRUNCATE, dl, LDIntVT, Value); + AddToWorklist(Value.getNode()); + } + + ValueVT = LDIntVT; + } + } + + // We now either have a type which is the size of the load's + // memory type, or the store's value type; we need to bitcast or + // convert to either the load's memory type or the load's value + // type. If the value currently has the store's value type, and + // needs to have the load's value type, we'll need to + // extend/truncate as necessary to make that happen. + if (ValueVT.getSizeInBits() != + LD->getMemoryVT().getSizeInBits() && + ValueVT.getSizeInBits() != + LD->getValueType(0).getSizeInBits()) { + // We have the size of the store's value type, and need to have + // the size of the load's value type. This can only happen for + // non-vector integer types. + EVT LDIntVT = + EVT::getIntegerVT(*DAG.getContext(), + LD->getValueType(0).getSizeInBits()); + if (Check && LegalTypes && !isTypeLegal(LDIntVT)) + return false; + + unsigned ConvOpcode = ISD::TRUNCATE; + if (ValueVT.getSizeInBits() < + LD->getValueType(0).getSizeInBits()) { + ConvOpcode = ISD::ANY_EXTEND; + if (Check && LegalOperations && + !TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND, LDIntVT) && + !TLI.isOperationLegalOrCustom(ISD::ZERO_EXTEND, LDIntVT) && + !TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, LDIntVT)) + // We don't know how anyext will be legalized, but if none + // of these are legal, then assume we can't do this. + return false; + } + + if (!Check) { + Value = DAG.getNode(ConvOpcode, dl, LDIntVT, Value); + AddToWorklist(Value.getNode()); + } + + ValueVT = LDIntVT; + } + + // Now convert to either the load's memory type or the load's + // value type. + + EVT TargetVT = + ValueVT.getSizeInBits() == LD->getMemoryVT().getSizeInBits() ? + LD->getMemoryVT() : LD->getValueType(0); + + if (Check) { + if (LegalTypes && !isTypeLegal(TargetVT)) + return false; + if (LegalOperations && + TargetVT != ValueVT && + !TLI.isOperationLegalOrCustom(ISD::BITCAST, + TargetVT)) + return false; + } else { + Value = DAG.getNode(ISD::BITCAST, dl, TargetVT, Value); + AddToWorklist(Value.getNode()); + } + + ValueVT = TargetVT; + + ISD::LoadExtType ExtType = LD->getExtensionType(); + unsigned ExtOpcode = 0; + switch (ExtType) { + default: break; + case ISD::SEXTLOAD: ExtOpcode = ISD::SIGN_EXTEND; break; + case ISD::ZEXTLOAD: ExtOpcode = ISD::ZERO_EXTEND; break; + case ISD::EXTLOAD: + ExtOpcode = Value.getValueType().isFloatingPoint() ? + ISD::FP_EXTEND : ISD::ANY_EXTEND; + break; + }; + + bool ExtInReg = + ValueVT.getSizeInBits() == LD->getValueType(0).getSizeInBits(); + // Non-vector anyext "in register" is a nop. + if (ExtOpcode == ISD::ANY_EXTEND && !ValueVT.isVector() && + ExtInReg) + ExtOpcode = 0; + + if (ExtOpcode) { + if (ExtInReg) { + switch (ExtOpcode) { + default: llvm_unreachable("Unknown extension type?"); + case ISD::ANY_EXTEND: + ExtOpcode = ISD::ANY_EXTEND_VECTOR_INREG; + break; + case ISD::ZERO_EXTEND: + ExtOpcode = ValueVT.isVector() ? + ISD::ZERO_EXTEND_VECTOR_INREG : ISD::AND; + break; + case ISD::SIGN_EXTEND: + ExtOpcode = ValueVT.isVector() ? + ISD::SIGN_EXTEND_VECTOR_INREG : + ISD::SIGN_EXTEND_INREG; + break; + } + } + + if (ExtOpcode == ISD::ANY_EXTEND && LegalOperations && + !TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND, + LD->getValueType(0))) { + if (TLI.isOperationLegalOrCustom(ISD::ZERO_EXTEND, + LD->getValueType(0))) + ExtOpcode = ISD::ZERO_EXTEND; + else if (TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, + LD->getValueType(0))) + ExtOpcode = ISD::SIGN_EXTEND; + } + + if (Check) { + // Given that DAGCombine runs LegalizeOp on these nodes, we + // can use the default expansions for the "in register" + // extensions, which should be reasonable. + if (LegalOperations && !ExtInReg && + !TLI.isOperationLegalOrCustom(ExtOpcode, + LD->getValueType(0))) + return false; + } else { + if (!ExtInReg) { + Value = DAG.getNode(ExtOpcode, dl, LD->getValueType(0), + Value); + } else if (ExtOpcode == ISD::AND) { + // This is an "in register" zero extend. + APInt Mask = + APInt::getLowBitsSet(LD->getValueType(0).getSizeInBits(), + 8*LDSize); + Value = DAG.getNode(ISD::AND, dl, LD->getValueType(0), + Value, DAG.getConstant(Mask, dl, + LD->getValueType(0))); + } else { + Value = + DAG.getNode(ExtOpcode, dl, LD->getValueType(0), + Value, DAG.getValueType(LD->getMemoryVT())); + } + + AddToWorklist(Value.getNode()); + } + } + + return true; + }; + + SDValue Value; + if (CheckOrGenerate(true, Value)) { + CheckOrGenerate(false, Value); + DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), Value); + + // We've now made the load dead (unless it is an indexed load, in + // which case we've made it replaceable with some pointer + // arithmetic). Revisit it for further simplification. + AddUsersToWorklist(N); + AddToWorklist(N); + } + + break; + } + } + } } } @@ -10177,13 +10636,6 @@ } } - bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA - : DAG.getSubtarget().useAA(); -#ifndef NDEBUG - if (CombinerAAOnlyFunc.getNumOccurrences() && - CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) - UseAA = false; -#endif if (UseAA && LD->isUnindexed()) { // Walk up chain skipping non-aliasing memory nodes. SDValue BetterChain = FindBetterChain(N, Chain); @@ -11084,110 +11536,6 @@ return SDValue(); } -namespace { -/// Helper struct to parse and store a memory address as base + index + offset. -/// We ignore sign extensions when it is safe to do so. -/// The following two expressions are not equivalent. To differentiate we need -/// to store whether there was a sign extension involved in the index -/// computation. -/// (load (i64 add (i64 copyfromreg %c) -/// (i64 signextend (add (i8 load %index) -/// (i8 1)))) -/// vs -/// -/// (load (i64 add (i64 copyfromreg %c) -/// (i64 signextend (i32 add (i32 signextend (i8 load %index)) -/// (i32 1))))) -struct BaseIndexOffset { - SDValue Base; - SDValue Index; - int64_t Offset; - bool IsIndexSignExt; - - BaseIndexOffset() : Offset(0), IsIndexSignExt(false) {} - - BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset, - bool IsIndexSignExt) : - Base(Base), Index(Index), Offset(Offset), IsIndexSignExt(IsIndexSignExt) {} - - bool equalBaseIndex(const BaseIndexOffset &Other) { - return Other.Base == Base && Other.Index == Index && - Other.IsIndexSignExt == IsIndexSignExt; - } - - /// Parses tree in Ptr for base, index, offset addresses. - static BaseIndexOffset match(SDValue Ptr, SelectionDAG &DAG) { - bool IsIndexSignExt = false; - - // Split up a folded GlobalAddress+Offset into its component parts. - if (GlobalAddressSDNode *GA = dyn_cast(Ptr)) - if (GA->getOpcode() == ISD::GlobalAddress && GA->getOffset() != 0) { - return BaseIndexOffset(DAG.getGlobalAddress(GA->getGlobal(), - SDLoc(GA), - GA->getValueType(0), - /*Offset=*/0, - /*isTargetGA=*/false, - GA->getTargetFlags()), - SDValue(), - GA->getOffset(), - IsIndexSignExt); - } - - // We only can pattern match BASE + INDEX + OFFSET. If Ptr is not an ADD - // instruction, then it could be just the BASE or everything else we don't - // know how to handle. Just use Ptr as BASE and give up. - if (Ptr->getOpcode() != ISD::ADD) - return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); - - // We know that we have at least an ADD instruction. Try to pattern match - // the simple case of BASE + OFFSET. - if (isa(Ptr->getOperand(1))) { - int64_t Offset = cast(Ptr->getOperand(1))->getSExtValue(); - return BaseIndexOffset(Ptr->getOperand(0), SDValue(), Offset, - IsIndexSignExt); - } - - // Inside a loop the current BASE pointer is calculated using an ADD and a - // MUL instruction. In this case Ptr is the actual BASE pointer. - // (i64 add (i64 %array_ptr) - // (i64 mul (i64 %induction_var) - // (i64 %element_size))) - if (Ptr->getOperand(1)->getOpcode() == ISD::MUL) - return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); - - // Look at Base + Index + Offset cases. - SDValue Base = Ptr->getOperand(0); - SDValue IndexOffset = Ptr->getOperand(1); - - // Skip signextends. - if (IndexOffset->getOpcode() == ISD::SIGN_EXTEND) { - IndexOffset = IndexOffset->getOperand(0); - IsIndexSignExt = true; - } - - // Either the case of Base + Index (no offset) or something else. - if (IndexOffset->getOpcode() != ISD::ADD) - return BaseIndexOffset(Base, IndexOffset, 0, IsIndexSignExt); - - // Now we have the case of Base + Index + offset. - SDValue Index = IndexOffset->getOperand(0); - SDValue Offset = IndexOffset->getOperand(1); - - if (!isa(Offset)) - return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); - - // Ignore signextends. - if (Index->getOpcode() == ISD::SIGN_EXTEND) { - Index = Index->getOperand(0); - IsIndexSignExt = true; - } else IsIndexSignExt = false; - - int64_t Off = cast(Offset)->getSExtValue(); - return BaseIndexOffset(Base, Index, Off, IsIndexSignExt); - } -}; -} // namespace - // This is a helper function for visitMUL to check the profitability // of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). // MulNode is the original multiply, AddNode is (add x, c1), @@ -14861,12 +15209,16 @@ /// Return true if base is a frame index, which is known not to alias with /// anything but itself. Provides base object and offset as results. static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset, - const GlobalValue *&GV, const void *&CV) { + const GlobalValue *&GV, const void *&CV, + SelectionDAG &DAG) { // Assume it is a primitive operation. Base = Ptr; Offset = 0; GV = nullptr; CV = nullptr; // If it's an adding a simple constant then integrate the offset. - if (Base.getOpcode() == ISD::ADD) { + if (Base.getOpcode() == ISD::ADD || + (Base.getOpcode() == ISD::OR && + DAG.haveNoCommonBitsSet(Base.getOperand(0), + Base.getOperand(1)))) { if (ConstantSDNode *C = dyn_cast(Base.getOperand(1))) { Base = Base.getOperand(0); Offset += C->getZExtValue(); @@ -14918,9 +15270,9 @@ const GlobalValue *GV1, *GV2; const void *CV1, *CV2; bool isFrameIndex1 = FindBaseOffset(Op0->getBasePtr(), - Base1, Offset1, GV1, CV1); + Base1, Offset1, GV1, CV1, DAG); bool isFrameIndex2 = FindBaseOffset(Op1->getBasePtr(), - Base2, Offset2, GV2, CV2); + Base2, Offset2, GV2, CV2, DAG); // If they have a same base address then check to see if they overlap. if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2)))