Index: include/llvm/CodeGen/SelectionDAG.h =================================================================== --- include/llvm/CodeGen/SelectionDAG.h +++ include/llvm/CodeGen/SelectionDAG.h @@ -281,6 +281,7 @@ void clear(); MachineFunction &getMachineFunction() const { return *MF; } + const DataLayout &getDataLayout() const { return MF->getDataLayout(); } const TargetMachine &getTarget() const { return TM; } const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); } const TargetLowering &getTargetLoweringInfo() const { return *TLI; } Index: include/llvm/Target/TargetLowering.h =================================================================== --- include/llvm/Target/TargetLowering.h +++ include/llvm/Target/TargetLowering.h @@ -2414,6 +2414,8 @@ ArgListTy &getArgs() { return Args; } + + SelectionDAG &getDAG() { return DAG; } }; /// This function lowers an abstract call to a function into an actual call. @@ -2657,7 +2659,8 @@ /// specific constraints and their prefixes, and also tie in the associated /// operand values. If this returns an empty vector, and if the constraint /// string itself isn't empty, there was an error parsing. - virtual AsmOperandInfoVector ParseConstraints(const TargetRegisterInfo *TRI, + virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, + const TargetRegisterInfo *TRI, ImmutableCallSite CS) const; /// Examine constraint type and operand type and determine a weight value. Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -2988,7 +2988,8 @@ const TargetLowering *TLI = TM.getSubtargetImpl(*F)->getTargetLowering(); const TargetRegisterInfo *TRI = TM.getSubtargetImpl(*F)->getRegisterInfo(); TargetLowering::AsmOperandInfoVector TargetConstraints = - TLI->ParseConstraints(TRI, ImmutableCallSite(CI)); + TLI->ParseConstraints(F->getParent()->getDataLayout(), TRI, + ImmutableCallSite(CI)); for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; @@ -3549,8 +3550,8 @@ const TargetRegisterInfo *TRI = TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo(); - TargetLowering::AsmOperandInfoVector - TargetConstraints = TLI->ParseConstraints(TRI, CS); + TargetLowering::AsmOperandInfoVector TargetConstraints = + TLI->ParseConstraints(*DL, TRI, CS); unsigned ArgNo = 0; for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -3111,7 +3111,7 @@ // For big endian targets, we need to add an offset to the pointer // to load the correct bytes. For little endian systems, we merely // need to read fewer bytes from the same pointer. - if (TLI.isBigEndian()) { + if (DAG.getDataLayout().isBigEndian()) { unsigned LVTStoreBytes = LoadedVT.getStoreSize(); unsigned EVTStoreBytes = ExtVT.getStoreSize(); unsigned PtrOff = LVTStoreBytes - EVTStoreBytes; @@ -6675,7 +6675,7 @@ // For big endian targets, we need to adjust the offset to the pointer to // load the correct bytes. - if (TLI.isBigEndian()) { + if (DAG.getDataLayout().isBigEndian()) { unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits(); unsigned EVTStoreBits = ExtVT.getStoreSizeInBits(); ShAmt = LVTStoreBits - EVTStoreBits - ShAmt; @@ -6873,7 +6873,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); - bool isLE = TLI.isLittleEndian(); + bool isLE = DAG.getDataLayout().isLittleEndian(); // noop truncate if (N0.getValueType() == N->getValueType(0)) @@ -7093,8 +7093,8 @@ !LD2->isVolatile() && DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) { unsigned Align = LD1->getAlignment(); - unsigned NewAlign = TLI.getDataLayout()-> - getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); + unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment( + VT.getTypeForEVT(*DAG.getContext())); if (NewAlign <= Align && (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) @@ -7155,8 +7155,8 @@ (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) && TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) { LoadSDNode *LN0 = cast(N0); - unsigned Align = TLI.getDataLayout()-> - getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); + unsigned Align = DAG.getDataLayout().getABITypeAlignment( + VT.getTypeForEVT(*DAG.getContext())); unsigned OrigAlign = LN0->getAlignment(); if (Align <= OrigAlign) { @@ -7368,7 +7368,7 @@ SmallVector Ops; for (unsigned i = 0, e = BV->getNumOperands(); i != e; i += NumInputsPerOutput) { - bool isLE = TLI.isLittleEndian(); + bool isLE = DAG.getDataLayout().isLittleEndian(); APInt NewBits = APInt(DstBitSize, 0); bool EltIsUndef = true; for (unsigned j = 0; j != NumInputsPerOutput; ++j) { @@ -7415,7 +7415,7 @@ } // For big endian targets, swap the order of the pieces of each element. - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::reverse(Ops.end()-NumOutputsPerInput, Ops.end()); } @@ -9869,8 +9869,7 @@ /// \pre DAG != nullptr. uint64_t getOffsetFromBase() const { assert(DAG && "Missing context."); - bool IsBigEndian = - DAG->getTargetLoweringInfo().getDataLayout()->isBigEndian(); + bool IsBigEndian = DAG->getDataLayout().isBigEndian(); assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported."); uint64_t Offset = Shift / 8; unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8; @@ -9953,7 +9952,7 @@ // Check if it will be merged with the load. // 1. Check the alignment constraint. - unsigned RequiredAlignment = TLI.getDataLayout()->getABITypeAlignment( + unsigned RequiredAlignment = DAG->getDataLayout().getABITypeAlignment( ResVT.getTypeForEVT(*DAG->getContext())); if (RequiredAlignment > getAlignment()) @@ -10321,7 +10320,7 @@ unsigned StOffset; unsigned NewAlign = St->getAlignment(); - if (DAG.getTargetLoweringInfo().isLittleEndian()) + if (DAG.getDataLayout().isLittleEndian()) StOffset = ByteShift; else StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes; @@ -10434,12 +10433,12 @@ uint64_t PtrOff = ShAmt / 8; // For big endian targets, we need to adjust the offset to the pointer to // load the correct bytes. - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff; unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff); Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext()); - if (NewAlign < TLI.getDataLayout()->getABITypeAlignment(NewVTTy)) + if (NewAlign < DAG.getDataLayout().getABITypeAlignment(NewVTTy)) return SDValue(); SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD), @@ -10503,7 +10502,7 @@ unsigned LDAlign = LD->getAlignment(); unsigned STAlign = ST->getAlignment(); Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext()); - unsigned ABIAlign = TLI.getDataLayout()->getABITypeAlignment(IntVTTy); + unsigned ABIAlign = DAG.getDataLayout().getABITypeAlignment(IntVTTy); if (LDAlign < ABIAlign || STAlign < ABIAlign) return SDValue(); @@ -10685,7 +10684,7 @@ // Construct a single integer constant which is made of the smaller // constant inputs. - bool IsLE = TLI.isLittleEndian(); + bool IsLE = DAG.getDataLayout().isLittleEndian(); for (unsigned i = 0; i < NumElem ; ++i) { unsigned Idx = IsLE ? (NumElem - 1 - i) : i; StoreSDNode *St = cast(StoreNodes[Idx].MemNode); @@ -10743,7 +10742,7 @@ return true; Type *Ty = EVTTy.getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment = TLI.getDataLayout()->getPrefTypeAlignment(Ty); + unsigned ABIAlignment = DAG.getDataLayout().getPrefTypeAlignment(Ty); return (Align >= ABIAlignment); } @@ -11205,8 +11204,8 @@ ST->isUnindexed()) { unsigned OrigAlign = ST->getAlignment(); EVT SVT = Value.getOperand(0).getValueType(); - unsigned Align = TLI.getDataLayout()-> - getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext())); + unsigned Align = DAG.getDataLayout().getABITypeAlignment( + SVT.getTypeForEVT(*DAG.getContext())); if (Align <= OrigAlign && ((!LegalOperations && !ST->isVolatile()) || TLI.isOperationLegalOrCustom(ISD::STORE, SVT))) @@ -11265,7 +11264,8 @@ uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue(); SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32); SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32); - if (TLI.isBigEndian()) std::swap(Lo, Hi); + if (DAG.getDataLayout().isBigEndian()) + std::swap(Lo, Hi); unsigned Alignment = ST->getAlignment(); bool isVolatile = ST->isVolatile(); @@ -11514,7 +11514,7 @@ EVT ResultVT = EVE->getValueType(0); EVT VecEltVT = InVecVT.getVectorElementType(); unsigned Align = OriginalLoad->getAlignment(); - unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment( + unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment( VecEltVT.getTypeForEVT(*DAG.getContext())); if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT)) @@ -11825,7 +11825,7 @@ if (!ValidTypes) return SDValue(); - bool isLE = TLI.isLittleEndian(); + bool isLE = DAG.getDataLayout().isLittleEndian(); unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits(); assert(ElemRatio > 1 && "Invalid element size ratio"); SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType): @@ -13354,7 +13354,7 @@ const_cast(TV->getConstantFPValue()) }; Type *FPTy = Elts[0]->getType(); - const DataLayout &TD = *TLI.getDataLayout(); + const DataLayout &TD = DAG.getDataLayout(); // Create a ConstantArray of the two constants. Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts); Index: lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp =================================================================== --- lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -106,9 +106,9 @@ if (AI->isStaticAlloca()) { const ConstantInt *CUI = cast(AI->getArraySize()); Type *Ty = AI->getAllocatedType(); - uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty); + uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty); unsigned Align = - std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), + std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty), AI->getAlignment()); TySize *= CUI->getZExtValue(); // Get total allocated size. @@ -118,10 +118,10 @@ MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI); } else { - unsigned Align = std::max( - (unsigned)TLI->getDataLayout()->getPrefTypeAlignment( - AI->getAllocatedType()), - AI->getAlignment()); + unsigned Align = + std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment( + AI->getAllocatedType()), + AI->getAlignment()); unsigned StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlignment(); if (Align <= StackAlign) @@ -138,7 +138,7 @@ unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); std::vector Ops = - TLI->ParseConstraints(TRI, CS); + TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI, CS); for (size_t I = 0, E = Ops.size(); I != E; ++I) { TargetLowering::AsmOperandInfo &Op = Ops[I]; if (Op.Type == InlineAsm::isClobber) { Index: lib/CodeGen/SelectionDAG/InstrEmitter.cpp =================================================================== --- lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -406,10 +406,10 @@ Type *Type = CP->getType(); // MachineConstantPool wants an explicit alignment. if (Align == 0) { - Align = MF->getTarget().getDataLayout()->getPrefTypeAlignment(Type); + Align = MF->getDataLayout().getPrefTypeAlignment(Type); if (Align == 0) { // Alignment of vector types. FIXME! - Align = MF->getTarget().getDataLayout()->getTypeAllocSize(Type); + Align = MF->getDataLayout().getTypeAllocSize(Type); } } Index: lib/CodeGen/SelectionDAG/LegalizeDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -392,17 +392,18 @@ // Store the two parts SDValue Store1, Store2; - Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr, - ST->getPointerInfo(), NewStoredVT, + Store1 = DAG.getTruncStore(Chain, dl, + DAG.getDataLayout().isLittleEndian() ? Lo : Hi, + Ptr, ST->getPointerInfo(), NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), Alignment); Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, DAG.getConstant(IncrementSize, dl, TLI.getPointerTy(AS))); Alignment = MinAlign(Alignment, IncrementSize); - Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr, - ST->getPointerInfo().getWithOffset(IncrementSize), - NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), - Alignment, ST->getAAInfo()); + Store2 = DAG.getTruncStore( + Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr, + ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, + ST->isVolatile(), ST->isNonTemporal(), Alignment, ST->getAAInfo()); SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); @@ -522,7 +523,7 @@ // Load the value in two parts SDValue Lo, Hi; - if (TLI.isLittleEndian()) { + if (DAG.getDataLayout().isLittleEndian()) { Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), NewLoadedVT, LD->isVolatile(), LD->isNonTemporal(), LD->isInvariant(), Alignment, @@ -677,7 +678,8 @@ const APInt &IntVal = CFP->getValueAPF().bitcastToAPInt(); SDValue Lo = DAG.getConstant(IntVal.trunc(32), dl, MVT::i32); SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), dl, MVT::i32); - if (TLI.isBigEndian()) std::swap(Lo, Hi); + if (DAG.getDataLayout().isBigEndian()) + std::swap(Lo, Hi); Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(), isVolatile, isNonTemporal, Alignment, AAInfo); @@ -724,7 +726,7 @@ unsigned Align = ST->getAlignment(); if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) { Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty); + unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); if (Align < ABIAlignment) ExpandUnalignedStore(cast(Node), DAG, TLI, this); } @@ -756,6 +758,7 @@ EVT StVT = ST->getMemoryVT(); unsigned StWidth = StVT.getSizeInBits(); + auto &DL = DAG.getDataLayout(); if (StWidth != StVT.getStoreSizeInBits()) { // Promote to a byte-sized store with upper bits zero if not @@ -782,7 +785,7 @@ SDValue Lo, Hi; unsigned IncrementSize; - if (TLI.isLittleEndian()) { + if (DL.isLittleEndian()) { // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) // Store the bottom RoundWidth bits. Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), @@ -838,7 +841,7 @@ // expand it. if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) { Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty); + unsigned ABIAlignment = DL.getABITypeAlignment(Ty); if (Align < ABIAlignment) ExpandUnalignedStore(cast(Node), DAG, TLI, this); } @@ -890,8 +893,7 @@ // expand it. if (!TLI.allowsMisalignedMemoryAccesses(LD->getMemoryVT(), AS, Align)) { Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment = - TLI.getDataLayout()->getABITypeAlignment(Ty); + unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); if (Align < ABIAlignment){ ExpandUnalignedLoad(cast(Node), DAG, TLI, RVal, RChain); } @@ -995,8 +997,9 @@ EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); SDValue Lo, Hi, Ch; unsigned IncrementSize; + auto &DL = DAG.getDataLayout(); - if (TLI.isLittleEndian()) { + if (DL.isLittleEndian()) { // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) // Load the bottom RoundWidth bits. Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), @@ -1086,7 +1089,7 @@ unsigned Align = LD->getAlignment(); if (!TLI.allowsMisalignedMemoryAccesses(MemVT, AS, Align)) { Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment = TLI.getDataLayout()->getABITypeAlignment(Ty); + unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); if (Align < ABIAlignment){ ExpandUnalignedLoad(cast(Node), DAG, TLI, Value, Chain); } @@ -1569,6 +1572,7 @@ // Convert to an integer with the same sign bit. SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2); } else { + auto &DL = DAG.getDataLayout(); // Store the float to memory, then load the sign part out as an integer. MVT LoadTy = TLI.getPointerTy(); // First create a temporary that is aligned for both the load and store. @@ -1577,7 +1581,7 @@ SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(), false, false, 0); - if (TLI.isBigEndian()) { + if (DL.isBigEndian()) { assert(FloatVT.isByteSized() && "Unsupported floating point type!"); // Load out a legal integer with the same sign bit as the float. SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(), @@ -1777,9 +1781,8 @@ EVT DestVT, SDLoc dl) { // Create the stack frame object. - unsigned SrcAlign = - TLI.getDataLayout()->getPrefTypeAlignment(SrcOp.getValueType(). - getTypeForEVT(*DAG.getContext())); + unsigned SrcAlign = DAG.getDataLayout().getPrefTypeAlignment( + SrcOp.getValueType().getTypeForEVT(*DAG.getContext())); SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); FrameIndexSDNode *StackPtrFI = cast(FIPtr); @@ -1790,7 +1793,7 @@ unsigned SlotSize = SlotVT.getSizeInBits(); unsigned DestSize = DestVT.getSizeInBits(); Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); - unsigned DestAlign = TLI.getDataLayout()->getPrefTypeAlignment(DestType); + unsigned DestAlign = DAG.getDataLayout().getPrefTypeAlignment(DestType); // Emit a store to the stack slot. Use a truncstore if the input value is // later than DestVT. @@ -2426,7 +2429,7 @@ SDValue Hi = StackSlot; SDValue Lo = DAG.getNode(ISD::ADD, dl, StackSlot.getValueType(), StackSlot, WordOff); - if (TLI.isLittleEndian()) + if (DAG.getDataLayout().isLittleEndian()) std::swap(Hi, Lo); // if signed map to unsigned space @@ -2584,7 +2587,8 @@ case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) } - if (TLI.isLittleEndian()) FF <<= 32; + if (DAG.getDataLayout().isLittleEndian()) + FF <<= 32; Constant *FudgeFactor = ConstantInt::get( Type::getInt64Ty(*DAG.getContext()), FF); @@ -3111,10 +3115,9 @@ // Increment the pointer, VAList, to the next vaarg Tmp3 = DAG.getNode(ISD::ADD, dl, VAList.getValueType(), VAList, - DAG.getConstant(TLI.getDataLayout()-> - getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), - dl, - VAList.getValueType())); + DAG.getConstant(DAG.getDataLayout().getTypeAllocSize( + VT.getTypeForEVT(*DAG.getContext())), + dl, VAList.getValueType())); // Store the incremented VAList to the legalized pointer Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, MachinePointerInfo(V), false, false, 0); @@ -3830,7 +3833,7 @@ EVT PTy = TLI.getPointerTy(); - const DataLayout &TD = *TLI.getDataLayout(); + const DataLayout &TD = DAG.getDataLayout(); unsigned EntrySize = DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); Index: lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -282,7 +282,7 @@ Lo = BitConvertToInteger(Lo); Hi = BitConvertToInteger(Hi); - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); InOp = DAG.getNode(ISD::ANY_EXTEND, dl, @@ -799,7 +799,7 @@ } // Handle endianness of the load. - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::reverse(Parts.begin(), Parts.end()); // Assemble the parts in the promoted type. @@ -1984,7 +1984,7 @@ // The high part is undefined. Hi = DAG.getUNDEF(NVT); } - } else if (TLI.isLittleEndian()) { + } else if (DAG.getDataLayout().isLittleEndian()) { // Little-endian - low bits are at low addresses. Lo = DAG.getLoad(NVT, dl, Ch, Ptr, N->getPointerInfo(), isVolatile, isNonTemporal, isInvariant, Alignment, @@ -2845,7 +2845,7 @@ Alignment, AAInfo); } - if (TLI.isLittleEndian()) { + if (DAG.getDataLayout().isLittleEndian()) { // Little-endian - low bits are at low addresses. GetExpandedInteger(N->getValue(), Lo, Hi); @@ -2963,7 +2963,8 @@ // Get a pointer to FF if the sign bit was set, or to 0 otherwise. SDValue Zero = DAG.getIntPtrConstant(0, dl); SDValue Four = DAG.getIntPtrConstant(4, dl); - if (TLI.isBigEndian()) std::swap(Zero, Four); + if (DAG.getDataLayout().isBigEndian()) + std::swap(Zero, Four); SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Zero, Four); unsigned Alignment = cast(FudgePtr)->getAlignment(); Index: lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp +++ lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp @@ -131,7 +131,7 @@ SDValue LHS = Vals[Slot]; SDValue RHS = Vals[Slot + 1]; - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(LHS, RHS); Vals.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, @@ -143,7 +143,7 @@ Lo = Vals[Slot++]; Hi = Vals[Slot++]; - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); return; @@ -155,9 +155,8 @@ // Create the stack frame object. Make sure it is aligned for both // the source and expanded destination types. - unsigned Alignment = - TLI.getDataLayout()->getPrefTypeAlignment(NOutVT. - getTypeForEVT(*DAG.getContext())); + unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment( + NOutVT.getTypeForEVT(*DAG.getContext())); SDValue StackPtr = DAG.CreateStackTemporary(InVT, Alignment); int SPFI = cast(StackPtr.getNode())->getIndex(); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI); @@ -241,7 +240,7 @@ DAG.getConstant(1, dl, Idx.getValueType())); Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, NewVec, Idx); - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); } @@ -325,7 +324,7 @@ if (NumElements > 1) { NumElements >>= 1; SplitInteger(Op, Parts[0], Parts[1]); - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(Parts[0], Parts[1]); IntegerToVector(Parts[0], NumElements, Ops, EltVT); IntegerToVector(Parts[1], NumElements, Ops, EltVT); @@ -389,7 +388,7 @@ for (unsigned i = 0; i < NumElts; ++i) { SDValue Lo, Hi; GetExpandedOp(N->getOperand(i), Lo, Hi); - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); NewElts.push_back(Lo); NewElts.push_back(Hi); @@ -431,7 +430,7 @@ SDValue Lo, Hi; GetExpandedOp(Val, Lo, Hi); - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); SDValue Idx = N->getOperand(2); Index: lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -803,7 +803,7 @@ // Place the extended lanes into the correct locations. int ExtLaneScale = NumSrcElements / NumElements; - int EndianOffset = TLI.isBigEndian() ? ExtLaneScale - 1 : 0; + int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0; for (int i = 0; i < NumElements; ++i) ShuffleMask[i * ExtLaneScale + EndianOffset] = i; @@ -858,7 +858,7 @@ ShuffleMask.push_back(i); int ExtLaneScale = NumSrcElements / NumElements; - int EndianOffset = TLI.isBigEndian() ? ExtLaneScale - 1 : 0; + int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0; for (int i = 0; i < NumElements; ++i) ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i; Index: lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -742,7 +742,7 @@ // expanded pieces. if (LoVT == HiVT) { GetExpandedOp(InOp, Lo, Hi); - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo); Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi); @@ -761,12 +761,12 @@ // In the general case, convert the input to an integer and split it by hand. EVT LoIntVT = EVT::getIntegerVT(*DAG.getContext(), LoVT.getSizeInBits()); EVT HiIntVT = EVT::getIntegerVT(*DAG.getContext(), HiVT.getSizeInBits()); - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(LoIntVT, HiIntVT); SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT, Lo, Hi); - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo); Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi); @@ -840,7 +840,7 @@ // Store the new subvector into the specified index. SDValue SubVecPtr = GetVectorElementPointer(StackPtr, SubVecVT, Idx); Type *VecType = VecVT.getTypeForEVT(*DAG.getContext()); - unsigned Alignment = TLI.getDataLayout()->getPrefTypeAlignment(VecType); + unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment(VecType); Store = DAG.getStore(Store, dl, SubVec, SubVecPtr, MachinePointerInfo(), false, false, 0); @@ -919,8 +919,7 @@ // so use a truncating store. SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx); Type *VecType = VecVT.getTypeForEVT(*DAG.getContext()); - unsigned Alignment = - TLI.getDataLayout()->getPrefTypeAlignment(VecType); + unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment(VecType); Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT, false, false, 0); @@ -1472,7 +1471,7 @@ Lo = BitConvertToInteger(Lo); Hi = BitConvertToInteger(Hi); - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Index: lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -921,7 +921,7 @@ PointerType::get(Type::getInt8Ty(*getContext()), 0) : VT.getTypeForEVT(*getContext()); - return TLI->getDataLayout()->getABITypeAlignment(Ty); + return getDataLayout().getABITypeAlignment(Ty); } // EntryNode could meaningfully have debug info if we can find it... @@ -1184,7 +1184,7 @@ // EltParts is currently in little endian order. If we actually want // big-endian order then reverse it now. - if (TLI->isBigEndian()) + if (getDataLayout().isBigEndian()) std::reverse(EltParts.begin(), EltParts.end()); // The elements must be reversed when the element order is different @@ -1303,7 +1303,7 @@ "Cannot set target flags on target-independent globals"); // Truncate (with sign-extension) the offset value to the pointer size. - unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType()); + unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); if (BitWidth < 64) Offset = SignExtend64(Offset, BitWidth); @@ -1373,7 +1373,7 @@ assert((TargetFlags == 0 || isTarget) && "Cannot set target flags on target-independent globals"); if (Alignment == 0) - Alignment = TLI->getDataLayout()->getPrefTypeAlignment(C->getType()); + Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, getVTList(VT), None); @@ -1400,7 +1400,7 @@ assert((TargetFlags == 0 || isTarget) && "Cannot set target flags on target-independent globals"); if (Alignment == 0) - Alignment = TLI->getDataLayout()->getPrefTypeAlignment(C->getType()); + Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, getVTList(VT), None); @@ -1864,7 +1864,7 @@ unsigned ByteSize = VT.getStoreSize(); Type *Ty = VT.getTypeForEVT(*getContext()); unsigned StackAlign = - std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign); + std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false); return getFrameIndex(FrameIdx, TLI->getPointerTy()); @@ -1877,9 +1877,9 @@ VT2.getStoreSizeInBits())/8; Type *Ty1 = VT1.getTypeForEVT(*getContext()); Type *Ty2 = VT2.getTypeForEVT(*getContext()); - const DataLayout *TD = TLI->getDataLayout(); - unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1), - TD->getPrefTypeAlignment(Ty2)); + const DataLayout &DL = getDataLayout(); + unsigned Align = + std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false); @@ -3994,7 +3994,7 @@ unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size())); APInt Val(NumVTBits, 0); - if (TLI.isLittleEndian()) { + if (DAG.getDataLayout().isLittleEndian()) { for (unsigned i = 0; i != NumBytes; ++i) Val |= (uint64_t)(unsigned char)Str[i] << i*8; } else { @@ -4066,7 +4066,7 @@ if (VT == MVT::Other) { unsigned AS = 0; - if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) || + if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(AS) || TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign)) { VT = TLI.getPointerTy(); } else { @@ -4185,14 +4185,14 @@ if (DstAlignCanChange) { Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); - unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); + unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); // Don't promote to an alignment that would require dynamic stack // realignment. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (!TRI->needsStackRealignment(MF)) - while (NewAlign > Align && - TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign)) + while (NewAlign > Align && + DAG.getDataLayout().exceedsNaturalStackAlignment(NewAlign)) NewAlign /= 2; if (NewAlign > Align) { @@ -4294,7 +4294,7 @@ if (DstAlignCanChange) { Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); - unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); + unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); if (NewAlign > Align) { // Give the stack frame object a larger alignment if needed. if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) @@ -4385,7 +4385,7 @@ if (DstAlignCanChange) { Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); - unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); + unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); if (NewAlign > Align) { // Give the stack frame object a larger alignment if needed. if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) @@ -4488,7 +4488,7 @@ // Emit a library call. TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; - Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext()); + Entry.Ty = getDataLayout().getIntPtrType(*getContext()); Entry.Node = Dst; Args.push_back(Entry); Entry.Node = Src; Args.push_back(Entry); Entry.Node = Size; Args.push_back(Entry); @@ -4594,7 +4594,7 @@ } // Emit a library call. - Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext()); + Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext()); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; Entry.Node = Dst; Entry.Ty = IntPtrTy; @@ -6891,10 +6891,10 @@ const GlobalValue *GV; int64_t GVOffset = 0; if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { - unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType()); + unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0); llvm::computeKnownBits(const_cast(GV), KnownZero, KnownOne, - *TLI->getDataLayout()); + getDataLayout()); unsigned AlignBits = KnownZero.countTrailingOnes(); unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; if (Align) Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -146,7 +146,7 @@ Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]); } - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi); @@ -160,7 +160,7 @@ // Combine the round and odd parts. Lo = Val; - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi); @@ -362,10 +362,10 @@ if (ValueVT.isVector()) return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V); - const TargetLowering &TLI = DAG.getTargetLoweringInfo(); unsigned PartBits = PartVT.getSizeInBits(); unsigned OrigNumParts = NumParts; - assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!"); + assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && + "Copying to an illegal type!"); if (NumParts == 0) return; @@ -433,7 +433,7 @@ DAG.getIntPtrConstant(RoundBits, DL)); getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V); - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) // The odd parts were reversed by getCopyToParts - unreverse them. std::reverse(Parts + RoundParts, Parts + NumParts); @@ -468,7 +468,7 @@ } } - if (TLI.isBigEndian()) + if (DAG.getDataLayout().isBigEndian()) std::reverse(Parts, Parts + OrigNumParts); } @@ -807,7 +807,7 @@ AA = &aa; GFI = gfi; LibInfo = li; - DL = DAG.getTarget().getDataLayout(); + DL = &DAG.getDataLayout(); Context = DAG.getContext(); LPadToCallSiteMap.clear(); } @@ -1771,8 +1771,7 @@ SDValue GuardPtr = getValue(IRGuard); SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy); - unsigned Align = - TLI.getDataLayout()->getPrefTypeAlignment(IRGuard->getType()); + unsigned Align = DL->getPrefTypeAlignment(IRGuard->getType()); SDValue Guard; SDLoc dl = getCurSDLoc(); @@ -2823,10 +2822,10 @@ SDLoc dl = getCurSDLoc(); Type *Ty = I.getAllocatedType(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty); + auto &DL = DAG.getDataLayout(); + uint64_t TySize = DL.getTypeAllocSize(Ty); unsigned Align = - std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty), - I.getAlignment()); + std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment()); SDValue AllocSize = getValue(I.getArraySize()); @@ -5670,9 +5669,8 @@ /// getCallOperandValEVT - Return the EVT of the Value* that this operand /// corresponds to. If there is no Value* for this operand, it returns /// MVT::Other. - EVT getCallOperandValEVT(LLVMContext &Context, - const TargetLowering &TLI, - const DataLayout *DL) const { + EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI, + const DataLayout &DL) const { if (!CallOperandVal) return MVT::Other; if (isa(CallOperandVal)) @@ -5698,7 +5696,7 @@ // If OpTy is not a single value, it may be a struct/union that we // can tile with integers. if (!OpTy->isSingleValueType() && OpTy->isSized()) { - unsigned BitSize = DL->getTypeSizeInBits(OpTy); + unsigned BitSize = DL.getTypeSizeInBits(OpTy); switch (BitSize) { default: break; case 1: @@ -5838,8 +5836,8 @@ SDISelAsmOperandInfoVector ConstraintOperands; const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - TargetLowering::AsmOperandInfoVector TargetConstraints = - TLI.ParseConstraints(DAG.getSubtarget().getRegisterInfo(), CS); + TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints( + DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS); bool hasMemory = false; @@ -5888,8 +5886,8 @@ OpInfo.CallOperand = getValue(OpInfo.CallOperandVal); } - OpVT = - OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, DL).getSimpleVT(); + OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, + DAG.getDataLayout()).getSimpleVT(); } OpInfo.ConstraintVT = OpVT; @@ -5983,8 +5981,9 @@ // Otherwise, create a stack slot and emit a store to it before the // asm. Type *Ty = OpVal->getType(); - uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty); - unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(Ty); + auto &DL = DAG.getDataLayout(); + uint64_t TySize = DL.getTypeAllocSize(Ty); + unsigned Align = DL.getPrefTypeAlignment(Ty); MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false); SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy()); @@ -6380,7 +6379,7 @@ void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - const DataLayout &DL = *TLI.getDataLayout(); + const DataLayout &DL = DAG.getDataLayout(); SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurSDLoc(), getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)), @@ -6718,6 +6717,7 @@ Type *OrigRetTy = CLI.RetTy; SmallVector RetTys; SmallVector Offsets; + auto &DL = CLI.getDAG().getDataLayout(); ComputeValueVTs(*this, CLI.RetTy, RetTys, &Offsets); SmallVector Outs; @@ -6733,8 +6733,8 @@ // FIXME: equivalent assert? // assert(!CS.hasInAllocaArgument() && // "sret demotion is incompatible with inalloca"); - uint64_t TySize = getDataLayout()->getTypeAllocSize(CLI.RetTy); - unsigned Align = getDataLayout()->getPrefTypeAlignment(CLI.RetTy); + uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy); + unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy); MachineFunction &MF = CLI.DAG.getMachineFunction(); DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false); Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy); @@ -6797,7 +6797,7 @@ SDValue Op = SDValue(Args[i].Node.getNode(), Args[i].Node.getResNo() + Value); ISD::ArgFlagsTy Flags; - unsigned OriginalAlignment = getDataLayout()->getABITypeAlignment(ArgTy); + unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); if (Args[i].isZExt) Flags.setZExt(); @@ -6821,7 +6821,7 @@ if (Args[i].isByVal || Args[i].isInAlloca) { PointerType *Ty = cast(Args[i].Ty); Type *ElementTy = Ty->getElementType(); - Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy)); + Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); // For ByVal, alignment should come from FE. BE will guess if this // info is not there but there are cases it cannot get right. unsigned FrameAlign; @@ -7030,7 +7030,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) { SelectionDAG &DAG = SDB->DAG; SDLoc dl = SDB->getCurSDLoc(); - const DataLayout *DL = TLI->getDataLayout(); + const DataLayout &DL = DAG.getDataLayout(); SmallVector Ins; if (!FuncInfo->CanLowerReturn) { @@ -7066,7 +7066,7 @@ EVT VT = ValueVTs[Value]; Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); ISD::ArgFlagsTy Flags; - unsigned OriginalAlignment = DL->getABITypeAlignment(ArgTy); + unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt)) Flags.setZExt(); @@ -7090,7 +7090,7 @@ if (Flags.isByVal() || Flags.isInAlloca()) { PointerType *Ty = cast(I->getType()); Type *ElementTy = Ty->getElementType(); - Flags.setByValSize(DL->getTypeAllocSize(ElementTy)); + Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); // For ByVal, alignment should be passed from FE. BE will guess if // this info is not there but there are cases it cannot get right. unsigned FrameAlign; @@ -7595,7 +7595,7 @@ bool SelectionDAGBuilder::rangeFitsInWord(const APInt &Low, const APInt &High) { // FIXME: Using the pointer type doesn't seem ideal. - uint64_t BW = DAG.getTargetLoweringInfo().getPointerTy().getSizeInBits(); + uint64_t BW = DAG.getDataLayout().getPointerSizeInBits(); uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1; return Range <= BW; } Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1398,7 +1398,7 @@ APInt newMask = APInt::getLowBitsSet(maskWidth, width); for (unsigned offset=0; offsetisLittleEndian()) + if (!DAG.getDataLayout().isLittleEndian()) bestOffset = (origWidth/width - offset - 1) * (width/8); else bestOffset = (uint64_t)offset * (width/8); @@ -2290,7 +2290,8 @@ /// If this returns an empty vector, and if the constraint string itself /// isn't empty, there was an error parsing. TargetLowering::AsmOperandInfoVector -TargetLowering::ParseConstraints(const TargetRegisterInfo *TRI, +TargetLowering::ParseConstraints(const DataLayout &DL, + const TargetRegisterInfo *TRI, ImmutableCallSite CS) const { /// ConstraintOperands - Information about all of the constraints. AsmOperandInfoVector ConstraintOperands; @@ -2358,7 +2359,7 @@ // If OpTy is not a single value, it may be a struct/union that we // can tile with integers. if (!OpTy->isSingleValueType() && OpTy->isSized()) { - unsigned BitSize = getDataLayout()->getTypeSizeInBits(OpTy); + unsigned BitSize = DL.getTypeSizeInBits(OpTy); switch (BitSize) { default: break; case 1: @@ -2372,8 +2373,7 @@ break; } } else if (PointerType *PT = dyn_cast(OpTy)) { - unsigned PtrSize - = getDataLayout()->getPointerSizeInBits(PT->getAddressSpace()); + unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace()); OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize); } else { OpInfo.ConstraintVT = MVT::getVT(OpTy, true);