diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h --- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -19,6 +19,7 @@ #include "llvm/CodeGen/LowLevelType.h" #include "llvm/CodeGen/Register.h" +#include "llvm/Support/Alignment.h" namespace llvm { @@ -189,13 +190,13 @@ private: // Memcpy family optimization helpers. bool optimizeMemcpy(MachineInstr &MI, Register Dst, Register Src, - unsigned KnownLen, unsigned DstAlign, unsigned SrcAlign, + unsigned KnownLen, Align DstAlign, Align SrcAlign, bool IsVolatile); bool optimizeMemmove(MachineInstr &MI, Register Dst, Register Src, - unsigned KnownLen, unsigned DstAlign, unsigned SrcAlign, - bool IsVolatile); + unsigned KnownLen, Align DstAlign, Align SrcAlign, + bool IsVolatile); bool optimizeMemset(MachineInstr &MI, Register Dst, Register Val, - unsigned KnownLen, unsigned DstAlign, bool IsVolatile); + unsigned KnownLen, Align DstAlign, bool IsVolatile); /// Given a non-indexed load or store instruction \p MI, find an offset that /// can be usefully and legally folded into it as a post-indexing operation. diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h --- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h +++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h @@ -461,13 +461,22 @@ } /// Return the alignment of the specified stack object. + /// FIXME: Remove this function once transition to Align is over. unsigned getObjectAlignment(int ObjectIdx) const { assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && "Invalid Object Idx!"); return Objects[ObjectIdx + NumFixedObjects].Alignment.value(); } + /// Return the alignment of the specified stack object. + Align getObjectAlign(int ObjectIdx) const { + assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + return Objects[ObjectIdx + NumFixedObjects].Alignment; + } + /// setObjectAlignment - Change the alignment of the specified stack object. + /// FIXME: Remove this function once transition to Align is over. void setObjectAlignment(int ObjectIdx, unsigned Align) { assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && "Invalid Object Idx!"); @@ -478,6 +487,17 @@ ensureMaxAlignment(Align); } + /// setObjectAlignment - Change the alignment of the specified stack object. + void setObjectAlignment(int ObjectIdx, Align Alignment) { + assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + Objects[ObjectIdx + NumFixedObjects].Alignment = Alignment; + + // Only ensure max alignment for the default stack. + if (getStackID(ObjectIdx) == 0) + ensureMaxAlignment(Alignment); + } + /// Return the underlying Alloca of the specified /// stack object if it exists. Returns 0 if none exists. const AllocaInst* getObjectAllocation(int ObjectIdx) const { diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -110,7 +110,7 @@ struct MemOp { // Shared uint64_t Size; - unsigned DstAlign; // Specified alignment of the memory operation or zero if + uint64_t DstAlign; // Specified alignment of the memory operation or zero if // destination alignment can satisfy any constraint. bool AllowOverlap; // memset only @@ -119,30 +119,27 @@ // memcpy only bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register // constant so it does not need to be loaded. - unsigned SrcAlign; // Inferred alignment of the source or zero if the memory + uint64_t SrcAlign; // Inferred alignment of the source or zero if the memory // operation does not need to load the value. - static MemOp Copy(uint64_t Size, bool DstAlignCanChange, unsigned DstAlign, - unsigned SrcAlign, bool IsVolatile, + static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, + Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc = false) { - assert(DstAlign && "Destination alignment should be set"); - assert(SrcAlign && "Source alignment should be set"); return { /*.Size =*/Size, - /*.DstAlign =*/DstAlignCanChange ? 0 : DstAlign, + /*.DstAlign =*/DstAlignCanChange ? 0 : DstAlign.value(), /*.AllowOverlap =*/!IsVolatile, /*.IsMemset =*/false, /*.ZeroMemset =*/false, /*.MemcpyStrSrc =*/MemcpyStrSrc, - /*.SrcAlign =*/SrcAlign, + /*.SrcAlign =*/SrcAlign.value(), }; } - static MemOp Set(uint64_t Size, bool DstAlignCanChange, unsigned DstAlign, + static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile) { - assert(DstAlign && "Destination alignment should be set"); return { /*.Size =*/Size, - /*.DstAlign =*/DstAlignCanChange ? 0 : DstAlign, + /*.DstAlign =*/DstAlignCanChange ? 0 : DstAlign.value(), /*.AllowOverlap =*/!IsVolatile, /*.IsMemset =*/true, /*.ZeroMemset =*/IsZeroMemset, diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -954,9 +954,9 @@ return Val; } -bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val, - unsigned KnownLen, unsigned Align, - bool IsVolatile) { +bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, + Register Val, unsigned KnownLen, + Align Alignment, bool IsVolatile) { auto &MF = *MI.getParent()->getParent(); const auto &TLI = *MF.getSubtarget().getTargetLowering(); auto &DL = MF.getDataLayout(); @@ -983,7 +983,7 @@ if (!findGISelOptimalMemOpLowering(MemOps, Limit, MemOp::Set(KnownLen, DstAlignCanChange, - Align, + Alignment, /*IsZeroMemset=*/IsZeroVal, /*IsVolatile=*/IsVolatile), DstPtrInfo.getAddrSpace(), ~0u, @@ -993,13 +993,13 @@ if (DstAlignCanChange) { // Get an estimate of the type from the LLT. Type *IRTy = getTypeForLLT(MemOps[0], C); - unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); - if (NewAlign > Align) { - Align = NewAlign; + Align NewAlign(DL.getABITypeAlignment(IRTy)); + if (NewAlign > Alignment) { + Alignment = NewAlign; unsigned FI = FIDef->getOperand(1).getIndex(); // Give the stack frame object a larger alignment if needed. - if (MFI.getObjectAlignment(FI) < Align) - MFI.setObjectAlignment(FI, Align); + if (MFI.getObjectAlign(FI) < Alignment) + MFI.setObjectAlignment(FI, Alignment); } } @@ -1067,10 +1067,9 @@ return true; } - bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst, Register Src, unsigned KnownLen, - unsigned DstAlign, unsigned SrcAlign, + Align DstAlign, Align SrcAlign, bool IsVolatile) { auto &MF = *MI.getParent()->getParent(); const auto &TLI = *MF.getSubtarget().getTargetLowering(); @@ -1082,7 +1081,7 @@ bool DstAlignCanChange = false; MachineFrameInfo &MFI = MF.getFrameInfo(); bool OptSize = shouldLowerMemFuncForSize(MF); - unsigned Alignment = MinAlign(DstAlign, SrcAlign); + Align Alignment = commonAlignment(DstAlign, SrcAlign); MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) @@ -1111,21 +1110,20 @@ if (DstAlignCanChange) { // Get an estimate of the type from the LLT. Type *IRTy = getTypeForLLT(MemOps[0], C); - unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); + Align NewAlign(DL.getABITypeAlignment(IRTy)); // Don't promote to an alignment that would require dynamic stack // realignment. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (!TRI->needsStackRealignment(MF)) - while (NewAlign > Alignment && - DL.exceedsNaturalStackAlignment(Align(NewAlign))) - NewAlign /= 2; + while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) + NewAlign = NewAlign / 2; if (NewAlign > Alignment) { Alignment = NewAlign; unsigned FI = FIDef->getOperand(1).getIndex(); // Give the stack frame object a larger alignment if needed. - if (MFI.getObjectAlignment(FI) < Alignment) + if (MFI.getObjectAlign(FI) < Alignment) MFI.setObjectAlignment(FI, Alignment); } } @@ -1176,9 +1174,9 @@ } bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst, - Register Src, unsigned KnownLen, - unsigned DstAlign, unsigned SrcAlign, - bool IsVolatile) { + Register Src, unsigned KnownLen, + Align DstAlign, Align SrcAlign, + bool IsVolatile) { auto &MF = *MI.getParent()->getParent(); const auto &TLI = *MF.getSubtarget().getTargetLowering(); auto &DL = MF.getDataLayout(); @@ -1189,7 +1187,7 @@ bool DstAlignCanChange = false; MachineFrameInfo &MFI = MF.getFrameInfo(); bool OptSize = shouldLowerMemFuncForSize(MF); - unsigned Alignment = MinAlign(DstAlign, SrcAlign); + Align Alignment = commonAlignment(DstAlign, SrcAlign); MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) @@ -1217,21 +1215,20 @@ if (DstAlignCanChange) { // Get an estimate of the type from the LLT. Type *IRTy = getTypeForLLT(MemOps[0], C); - unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); + Align NewAlign(DL.getABITypeAlignment(IRTy)); // Don't promote to an alignment that would require dynamic stack // realignment. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (!TRI->needsStackRealignment(MF)) - while (NewAlign > Alignment && - DL.exceedsNaturalStackAlignment(Align(NewAlign))) - NewAlign /= 2; + while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) + NewAlign = NewAlign / 2; if (NewAlign > Alignment) { Alignment = NewAlign; unsigned FI = FIDef->getOperand(1).getIndex(); // Give the stack frame object a larger alignment if needed. - if (MFI.getObjectAlignment(FI) < Alignment) + if (MFI.getObjectAlign(FI) < Alignment) MFI.setObjectAlignment(FI, Alignment); } } @@ -1297,8 +1294,8 @@ if (IsVolatile) return false; - unsigned DstAlign = MemOp->getBaseAlignment(); - unsigned SrcAlign = 0; + Align DstAlign(MemOp->getBaseAlignment()); + Align SrcAlign; Register Dst = MI.getOperand(1).getReg(); Register Src = MI.getOperand(2).getReg(); Register Len = MI.getOperand(3).getReg(); @@ -1306,7 +1303,7 @@ if (ID != Intrinsic::memset) { assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI"); MemOp = *(++MMOIt); - SrcAlign = MemOp->getBaseAlignment(); + SrcAlign = Align(MemOp->getBaseAlignment()); } // See if this is a constant length copy diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -5877,7 +5877,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, - uint64_t Size, unsigned Alignment, + uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) { @@ -5901,9 +5901,10 @@ FrameIndexSDNode *FI = dyn_cast(Dst); if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; - unsigned SrcAlign = DAG.InferPtrAlignment(Src); - if (Alignment > SrcAlign) + MaybeAlign SrcAlign(DAG.InferPtrAlignment(Src)); + if (!SrcAlign || Alignment > *SrcAlign) SrcAlign = Alignment; + assert(SrcAlign && "SrcAlign must be set"); ConstantDataArraySlice Slice; bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; @@ -5912,7 +5913,7 @@ ? MemOp::Set(Size, DstAlignCanChange, Alignment, /*IsZeroMemset*/ true, isVol) : MemOp::Copy(Size, DstAlignCanChange, Alignment, - SrcAlign, isVol, CopyFromConstant); + *SrcAlign, isVol, CopyFromConstant); if (!TLI.findOptimalMemOpLowering( MemOps, Limit, Op, DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) @@ -5920,19 +5921,18 @@ if (DstAlignCanChange) { Type *Ty = MemOps[0].getTypeForEVT(C); - unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); + Align NewAlign(DL.getABITypeAlignment(Ty)); // Don't promote to an alignment that would require dynamic stack // realignment. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (!TRI->needsStackRealignment(MF)) - while (NewAlign > Alignment && - DL.exceedsNaturalStackAlignment(Align(NewAlign))) - NewAlign /= 2; + while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) + NewAlign = NewAlign / 2; if (NewAlign > Alignment) { // Give the stack frame object a larger alignment if needed. - if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) + if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) MFI.setObjectAlignment(FI->getIndex(), NewAlign); Alignment = NewAlign; } @@ -5979,7 +5979,7 @@ if (Value.getNode()) { Store = DAG.getStore( Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), - DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); + DstPtrInfo.getWithOffset(DstOff), Alignment.value(), MMOFlags); OutChains.push_back(Store); } } @@ -6002,12 +6002,13 @@ Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), SrcPtrInfo.getWithOffset(SrcOff), VT, - MinAlign(SrcAlign, SrcOff), SrcMMOFlags); + commonAlignment(*SrcAlign, SrcOff).value(), + SrcMMOFlags); OutLoadChains.push_back(Value.getValue(1)); Store = DAG.getTruncStore( Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), - DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags); + DstPtrInfo.getWithOffset(DstOff), VT, Alignment.value(), MMOFlags); OutStoreChains.push_back(Store); } SrcOff += VTSize; @@ -6063,7 +6064,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, - uint64_t Size, unsigned Align, + uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) { @@ -6085,13 +6086,14 @@ FrameIndexSDNode *FI = dyn_cast(Dst); if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; - unsigned SrcAlign = DAG.InferPtrAlignment(Src); - if (Align > SrcAlign) - SrcAlign = Align; + MaybeAlign SrcAlign(DAG.InferPtrAlignment(Src)); + if (!SrcAlign || Alignment > *SrcAlign) + SrcAlign = Alignment; + assert(SrcAlign && "SrcAlign must be set"); unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); if (!TLI.findOptimalMemOpLowering( MemOps, Limit, - MemOp::Copy(Size, DstAlignCanChange, Align, SrcAlign, + MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign, /*IsVolatile*/ true), DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) @@ -6099,12 +6101,12 @@ if (DstAlignCanChange) { Type *Ty = MemOps[0].getTypeForEVT(C); - unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); - if (NewAlign > Align) { + Align NewAlign(DL.getABITypeAlignment(Ty)); + if (NewAlign > Alignment) { // Give the stack frame object a larger alignment if needed. - if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) + if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) MFI.setObjectAlignment(FI->getIndex(), NewAlign); - Align = NewAlign; + Alignment = NewAlign; } } @@ -6126,9 +6128,9 @@ if (isDereferenceable) SrcMMOFlags |= MachineMemOperand::MODereferenceable; - Value = - DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), - SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); + Value = DAG.getLoad( + VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), + SrcPtrInfo.getWithOffset(SrcOff), SrcAlign->value(), SrcMMOFlags); LoadValues.push_back(Value); LoadChains.push_back(Value.getValue(1)); SrcOff += VTSize; @@ -6140,9 +6142,9 @@ unsigned VTSize = VT.getSizeInBits() / 8; SDValue Store; - Store = DAG.getStore(Chain, dl, LoadValues[i], - DAG.getMemBasePlusOffset(Dst, DstOff, dl), - DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); + Store = DAG.getStore( + Chain, dl, LoadValues[i], DAG.getMemBasePlusOffset(Dst, DstOff, dl), + DstPtrInfo.getWithOffset(DstOff), Alignment.value(), MMOFlags); OutChains.push_back(Store); DstOff += VTSize; } @@ -6159,7 +6161,7 @@ /// \param Dst Pointer to destination memory location. /// \param Src Value of byte to write into the memory. /// \param Size Number of bytes to write. -/// \param Align Alignment of the destination in bytes. +/// \param Alignment Alignment of the destination in bytes. /// \param isVol True if destination is volatile. /// \param DstPtrInfo IR information on the memory pointer. /// \returns New head in the control flow, if lowering was successful, empty @@ -6170,7 +6172,7 @@ /// memory size. static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, - uint64_t Size, unsigned Align, bool isVol, + uint64_t Size, Align Alignment, bool isVol, MachinePointerInfo DstPtrInfo) { // Turn a memset of undef to nop. // FIXME: We need to honor volatile even is Src is undef. @@ -6192,18 +6194,18 @@ isa(Src) && cast(Src)->isNullValue(); if (!TLI.findOptimalMemOpLowering( MemOps, TLI.getMaxStoresPerMemset(OptSize), - MemOp::Set(Size, DstAlignCanChange, Align, IsZeroVal, isVol), + MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol), DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes())) return SDValue(); if (DstAlignCanChange) { Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); - unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); - if (NewAlign > Align) { + Align NewAlign(DAG.getDataLayout().getABITypeAlignment(Ty)); + if (NewAlign > Alignment) { // Give the stack frame object a larger alignment if needed. - if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) + if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) MFI.setObjectAlignment(FI->getIndex(), NewAlign); - Align = NewAlign; + Alignment = NewAlign; } } @@ -6241,7 +6243,7 @@ assert(Value.getValueType() == VT && "Value with wrong type."); SDValue Store = DAG.getStore( Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), - DstPtrInfo.getWithOffset(DstOff), Align, + DstPtrInfo.getWithOffset(DstOff), Alignment.value(), isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); OutChains.push_back(Store); DstOff += VT.getSizeInBits() / 8; @@ -6262,11 +6264,12 @@ } SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, - SDValue Src, SDValue Size, unsigned Align, + SDValue Src, SDValue Size, unsigned Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) { - assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); + assert(Alignment && + "The SDAG layer expects explicit alignment and reserves 0"); // Check to see if we should lower the memcpy to loads and stores first. // For cases within the target-specified limits, this is the best choice. @@ -6276,9 +6279,9 @@ if (ConstantSize->isNullValue()) return Chain; - SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, - ConstantSize->getZExtValue(),Align, - isVol, false, DstPtrInfo, SrcPtrInfo); + SDValue Result = getMemcpyLoadsAndStores( + *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), + Align(Alignment), isVol, false, DstPtrInfo, SrcPtrInfo); if (Result.getNode()) return Result; } @@ -6287,7 +6290,7 @@ // code. If the target chooses to do this, this is the next best. if (TSI) { SDValue Result = TSI->EmitTargetCodeForMemcpy( - *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, + *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline, DstPtrInfo, SrcPtrInfo); if (Result.getNode()) return Result; @@ -6297,9 +6300,9 @@ // use a (potentially long) sequence of loads and stores. if (AlwaysInline) { assert(ConstantSize && "AlwaysInline requires a constant size!"); - return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, - ConstantSize->getZExtValue(), Align, isVol, - true, DstPtrInfo, SrcPtrInfo); + return getMemcpyLoadsAndStores( + *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), + Align(Alignment), isVol, true, DstPtrInfo, SrcPtrInfo); } checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); @@ -6378,11 +6381,12 @@ } SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, - SDValue Src, SDValue Size, unsigned Align, + SDValue Src, SDValue Size, unsigned Alignment, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) { - assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); + assert(Alignment && + "The SDAG layer expects explicit alignment and reserves 0"); // Check to see if we should lower the memmove to loads and stores first. // For cases within the target-specified limits, this is the best choice. @@ -6392,10 +6396,9 @@ if (ConstantSize->isNullValue()) return Chain; - SDValue Result = - getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, - ConstantSize->getZExtValue(), Align, isVol, - false, DstPtrInfo, SrcPtrInfo); + SDValue Result = getMemmoveLoadsAndStores( + *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), + Align(Alignment), isVol, false, DstPtrInfo, SrcPtrInfo); if (Result.getNode()) return Result; } @@ -6403,8 +6406,9 @@ // Then check to see if we should lower the memmove with target-specific // code. If the target chooses to do this, this is the next best. if (TSI) { - SDValue Result = TSI->EmitTargetCodeForMemmove( - *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); + SDValue Result = + TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, + Alignment, isVol, DstPtrInfo, SrcPtrInfo); if (Result.getNode()) return Result; } @@ -6482,10 +6486,11 @@ } SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, - SDValue Src, SDValue Size, unsigned Align, + SDValue Src, SDValue Size, unsigned Alignment, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo) { - assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); + assert(Alignment && + "The SDAG layer expects explicit alignment and reserves 0"); // Check to see if we should lower the memset to stores first. // For cases within the target-specified limits, this is the best choice. @@ -6495,9 +6500,9 @@ if (ConstantSize->isNullValue()) return Chain; - SDValue Result = - getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), - Align, isVol, DstPtrInfo); + SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src, + ConstantSize->getZExtValue(), + Align(Alignment), isVol, DstPtrInfo); if (Result.getNode()) return Result; @@ -6507,7 +6512,7 @@ // code. If the target chooses to do this, this is the next best. if (TSI) { SDValue Result = TSI->EmitTargetCodeForMemset( - *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); + *this, dl, Chain, Dst, Src, Size, Alignment, isVol, DstPtrInfo); if (Result.getNode()) return Result; } diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -566,8 +566,8 @@ return LibCallCost; const unsigned Size = C->getValue().getZExtValue(); - const unsigned DstAlign = MI->getDestAlignment(); - const unsigned SrcAlign = MI->getSourceAlignment(); + const Align DstAlign = *MI->getDestAlign(); + const Align SrcAlign = *MI->getSourceAlign(); const Function *F = I->getParent()->getParent(); const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize()); std::vector MemOps;