Changeset View
Changeset View
Standalone View
Standalone View
llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
Show First 20 Lines • Show All 1,044 Lines • ▼ Show 20 Lines | bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, | ||||
// ignore the input and let someone else zap MDep. This handles cases like: | // ignore the input and let someone else zap MDep. This handles cases like: | ||||
// memcpy(a <- a) | // memcpy(a <- a) | ||||
// memcpy(b <- a) | // memcpy(b <- a) | ||||
if (M->getSource() == MDep->getSource()) | if (M->getSource() == MDep->getSource()) | ||||
return false; | return false; | ||||
// Second, the length of the memcpy's must be the same, or the preceding one | // Second, the length of the memcpy's must be the same, or the preceding one | ||||
// must be larger than the following one. | // must be larger than the following one. | ||||
if (MDep->getLength() != M->getLength()) { | |||||
ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); | ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); | ||||
ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); | ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); | ||||
if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) | if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) | ||||
return false; | return false; | ||||
} | |||||
// Verify that the copied-from memory doesn't change in between the two | // Verify that the copied-from memory doesn't change in between the two | ||||
// transfers. For example, in: | // transfers. For example, in: | ||||
// memcpy(a <- b) | // memcpy(a <- b) | ||||
// *b = 42; | // *b = 42; | ||||
// memcpy(c <- a) | // memcpy(c <- a) | ||||
// It would be invalid to transform the second memcpy into memcpy(c <- b). | // It would be invalid to transform the second memcpy into memcpy(c <- b). | ||||
// | // | ||||
▲ Show 20 Lines • Show All 159 Lines • ▼ Show 20 Lines | bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, | ||||
} | } | ||||
eraseInstruction(MemSet); | eraseInstruction(MemSet); | ||||
return true; | return true; | ||||
} | } | ||||
/// Determine whether the instruction has undefined content for the given Size, | /// Determine whether the instruction has undefined content for the given Size, | ||||
/// either because it was freshly alloca'd or started its lifetime. | /// either because it was freshly alloca'd or started its lifetime. | ||||
static bool hasUndefContents(Instruction *I, ConstantInt *Size) { | static bool hasUndefContents(Instruction *I, Value *Size) { | ||||
if (isa<AllocaInst>(I)) | if (isa<AllocaInst>(I)) | ||||
return true; | return true; | ||||
if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) { | |||||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) | ||||
if (II->getIntrinsicID() == Intrinsic::lifetime_start) | if (II->getIntrinsicID() == Intrinsic::lifetime_start) | ||||
if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) | if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) | ||||
if (LTSize->getZExtValue() >= Size->getZExtValue()) | if (LTSize->getZExtValue() >= CSize->getZExtValue()) | ||||
return true; | return true; | ||||
} | |||||
return false; | return false; | ||||
} | } | ||||
static bool hasUndefContentsMSSA(MemorySSA *MSSA, AliasAnalysis *AA, Value *V, | static bool hasUndefContentsMSSA(MemorySSA *MSSA, AliasAnalysis *AA, Value *V, | ||||
MemoryDef *Def, ConstantInt *Size) { | MemoryDef *Def, Value *Size) { | ||||
if (MSSA->isLiveOnEntryDef(Def)) | if (MSSA->isLiveOnEntryDef(Def)) | ||||
return isa<AllocaInst>(getUnderlyingObject(V)); | return isa<AllocaInst>(getUnderlyingObject(V)); | ||||
if (IntrinsicInst *II = | if (IntrinsicInst *II = | ||||
dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) { | dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) { | ||||
if (II->getIntrinsicID() == Intrinsic::lifetime_start) { | if (II->getIntrinsicID() == Intrinsic::lifetime_start) { | ||||
ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0)); | ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0)); | ||||
nikic: The constant size is only needed for the isMustAlias case below. The other one can work with a… | |||||
if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) { | |||||
if (AA->isMustAlias(V, II->getArgOperand(1)) && | if (AA->isMustAlias(V, II->getArgOperand(1)) && | ||||
LTSize->getZExtValue() >= Size->getZExtValue()) | LTSize->getZExtValue() >= CSize->getZExtValue()) | ||||
return true; | return true; | ||||
} | |||||
// If the lifetime.start covers a whole alloca (as it almost always does) | // If the lifetime.start covers a whole alloca (as it almost always | ||||
// and we're querying a pointer based on that alloca, then we know the | // does) and we're querying a pointer based on that alloca, then we know | ||||
// memory is definitely undef, regardless of how exactly we alias. The | // the memory is definitely undef, regardless of how exactly we alias. | ||||
// size also doesn't matter, as an out-of-bounds access would be UB. | // The size also doesn't matter, as an out-of-bounds access would be UB. | ||||
AllocaInst *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V)); | AllocaInst *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V)); | ||||
if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) { | if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) { | ||||
DataLayout DL = Alloca->getModule()->getDataLayout(); | DataLayout DL = Alloca->getModule()->getDataLayout(); | ||||
if (Optional<TypeSize> AllocaSize = Alloca->getAllocationSizeInBits(DL)) | if (Optional<TypeSize> AllocaSize = Alloca->getAllocationSizeInBits(DL)) | ||||
if (*AllocaSize == LTSize->getValue() * 8) | if (*AllocaSize == LTSize->getValue() * 8) | ||||
return true; | return true; | ||||
} | } | ||||
} | } | ||||
Show All 9 Lines | |||||
/// memcpy(dst2, dst1, dst2_size); | /// memcpy(dst2, dst1, dst2_size); | ||||
/// \endcode | /// \endcode | ||||
/// into: | /// into: | ||||
/// \code | /// \code | ||||
/// memset(dst1, c, dst1_size); | /// memset(dst1, c, dst1_size); | ||||
/// memset(dst2, c, dst2_size); | /// memset(dst2, c, dst2_size); | ||||
/// \endcode | /// \endcode | ||||
/// When dst2_size <= dst1_size. | /// When dst2_size <= dst1_size. | ||||
/// | |||||
/// The \p MemCpy must have a Constant length. | |||||
bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, | bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, | ||||
MemSetInst *MemSet) { | MemSetInst *MemSet) { | ||||
// Make sure that memcpy(..., memset(...), ...), that is we are memsetting and | // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and | ||||
// memcpying from the same address. Otherwise it is hard to reason about. | // memcpying from the same address. Otherwise it is hard to reason about. | ||||
if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) | if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) | ||||
return false; | return false; | ||||
// A known memset size is required. | Value *MemSetSize = MemSet->getLength(); | ||||
ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); | Value *CopySize = MemCpy->getLength(); | ||||
if (!MemSetSize) | |||||
return false; | |||||
if (MemSetSize != CopySize) { | |||||
// Make sure the memcpy doesn't read any more than what the memset wrote. | // Make sure the memcpy doesn't read any more than what the memset wrote. | ||||
// Don't worry about sizes larger than i64. | // Don't worry about sizes larger than i64. | ||||
ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); | |||||
if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) { | // A known memset size is required. | ||||
ConstantInt *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize); | |||||
if (!CMemSetSize) | |||||
return false; | |||||
// A known memcpy size is also required. | |||||
ConstantInt *CCopySize = dyn_cast<ConstantInt>(CopySize); | |||||
if (!CCopySize) | |||||
return false; | |||||
if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) { | |||||
// If the memcpy is larger than the memset, but the memory was undef prior | // If the memcpy is larger than the memset, but the memory was undef prior | ||||
// to the memset, we can just ignore the tail. Technically we're only | // to the memset, we can just ignore the tail. Technically we're only | ||||
// interested in the bytes from MemSetSize..CopySize here, but as we can't | // interested in the bytes from MemSetSize..CopySize here, but as we can't | ||||
// easily represent this location, we use the full 0..CopySize range. | // easily represent this location, we use the full 0..CopySize range. | ||||
MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); | MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); | ||||
bool CanReduceSize = false; | bool CanReduceSize = false; | ||||
if (EnableMemorySSA) { | if (EnableMemorySSA) { | ||||
MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet); | MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet); | ||||
MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||
MemSetAccess->getDefiningAccess(), MemCpyLoc); | MemSetAccess->getDefiningAccess(), MemCpyLoc); | ||||
if (auto *MD = dyn_cast<MemoryDef>(Clobber)) | if (auto *MD = dyn_cast<MemoryDef>(Clobber)) | ||||
if (hasUndefContentsMSSA(MSSA, AA, MemCpy->getSource(), MD, CopySize)) | if (hasUndefContentsMSSA(MSSA, AA, MemCpy->getSource(), MD, CopySize)) | ||||
CanReduceSize = true; | CanReduceSize = true; | ||||
} else { | } else { | ||||
MemDepResult DepInfo = MD->getPointerDependencyFrom( | MemDepResult DepInfo = MD->getPointerDependencyFrom( | ||||
MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent()); | MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent()); | ||||
if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize)) | if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize)) | ||||
CanReduceSize = true; | CanReduceSize = true; | ||||
} | } | ||||
if (!CanReduceSize) | if (!CanReduceSize) | ||||
return false; | return false; | ||||
CopySize = MemSetSize; | CopySize = MemSetSize; | ||||
} | } | ||||
} | |||||
IRBuilder<> Builder(MemCpy); | IRBuilder<> Builder(MemCpy); | ||||
Instruction *NewM = | Instruction *NewM = | ||||
Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), | Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), | ||||
CopySize, MaybeAlign(MemCpy->getDestAlignment())); | CopySize, MaybeAlign(MemCpy->getDestAlignment())); | ||||
if (MSSAU) { | if (MSSAU) { | ||||
auto *LastDef = | auto *LastDef = | ||||
cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | if (EnableMemorySSA) { | ||||
// The memcpy most post-dom the memset, so limit this to the same basic | // The memcpy most post-dom the memset, so limit this to the same basic | ||||
// block. A non-local generalization is likely not worthwhile. | // block. A non-local generalization is likely not worthwhile. | ||||
if (auto *MD = dyn_cast<MemoryDef>(DestClobber)) | if (auto *MD = dyn_cast<MemoryDef>(DestClobber)) | ||||
if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst())) | if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst())) | ||||
if (DestClobber->getBlock() == M->getParent()) | if (DestClobber->getBlock() == M->getParent()) | ||||
if (processMemSetMemCpyDependence(M, MDep)) | if (processMemSetMemCpyDependence(M, MDep)) | ||||
return true; | return true; | ||||
// The optimizations after this point require the memcpy size. | |||||
ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); | |||||
if (!CopySize) return false; | |||||
MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess( | MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||
AnyClobber, MemoryLocation::getForSource(M)); | AnyClobber, MemoryLocation::getForSource(M)); | ||||
// There are four possible optimizations we can do for memcpy: | // There are four possible optimizations we can do for memcpy: | ||||
// a) memcpy-memcpy xform which exposes redundance for DSE. | // a) memcpy-memcpy xform which exposes redundance for DSE. | ||||
// b) call-memcpy xform for return slot optimization. | // b) call-memcpy xform for return slot optimization. | ||||
// c) memcpy from freshly alloca'd space or space that has just started | // c) memcpy from freshly alloca'd space or space that has just started | ||||
// its lifetime copies undefined data, and we can therefore eliminate | // its lifetime copies undefined data, and we can therefore eliminate | ||||
// the memcpy in favor of the data that was already at the destination. | // the memcpy in favor of the data that was already at the destination. | ||||
// d) memcpy from a just-memset'd source can be turned into memset. | // d) memcpy from a just-memset'd source can be turned into memset. | ||||
if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) { | if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) { | ||||
if (Instruction *MI = MD->getMemoryInst()) { | if (Instruction *MI = MD->getMemoryInst()) { | ||||
if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) { | |||||
if (auto *C = dyn_cast<CallInst>(MI)) { | if (auto *C = dyn_cast<CallInst>(MI)) { | ||||
// The memcpy must post-dom the call. Limit to the same block for now. | // The memcpy must post-dom the call. Limit to the same block for | ||||
// Additionally, we need to ensure that there are no accesses to dest | // now. Additionally, we need to ensure that there are no accesses | ||||
// between the call and the memcpy. Accesses to src will be checked | // to dest between the call and the memcpy. Accesses to src will be | ||||
// by performCallSlotOptzn(). | // checked by performCallSlotOptzn(). | ||||
// TODO: Support non-local call-slot optimization? | // TODO: Support non-local call-slot optimization? | ||||
if (C->getParent() == M->getParent() && | if (C->getParent() == M->getParent() && | ||||
!accessedBetween(*AA, DestLoc, MD, MA)) { | !accessedBetween(*AA, DestLoc, MD, MA)) { | ||||
// FIXME: Can we pass in either of dest/src alignment here instead | // FIXME: Can we pass in either of dest/src alignment here instead | ||||
// of conservatively taking the minimum? | // of conservatively taking the minimum? | ||||
Align Alignment = std::min(M->getDestAlign().valueOrOne(), | Align Alignment = std::min(M->getDestAlign().valueOrOne(), | ||||
M->getSourceAlign().valueOrOne()); | M->getSourceAlign().valueOrOne()); | ||||
if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), | if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), | ||||
CopySize->getZExtValue(), Alignment, C)) { | CopySize->getZExtValue(), Alignment, | ||||
C)) { | |||||
LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n" | LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n" | ||||
<< " call: " << *C << "\n" | << " call: " << *C << "\n" | ||||
<< " memcpy: " << *M << "\n"); | << " memcpy: " << *M << "\n"); | ||||
eraseInstruction(M); | eraseInstruction(M); | ||||
++NumMemCpyInstr; | ++NumMemCpyInstr; | ||||
return true; | return true; | ||||
} | } | ||||
Not Done ReplyInline ActionsI'd move this check a bit higher (accessedBetween is more expensive). nikic: I'd move this check a bit higher (accessedBetween is more expensive). | |||||
} | } | ||||
} | } | ||||
} | |||||
if (auto *MDep = dyn_cast<MemCpyInst>(MI)) | if (auto *MDep = dyn_cast<MemCpyInst>(MI)) | ||||
return processMemCpyMemCpyDependence(M, MDep); | return processMemCpyMemCpyDependence(M, MDep); | ||||
if (auto *MDep = dyn_cast<MemSetInst>(MI)) { | if (auto *MDep = dyn_cast<MemSetInst>(MI)) { | ||||
if (performMemCpyToMemSetOptzn(M, MDep)) { | if (performMemCpyToMemSetOptzn(M, MDep)) { | ||||
LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n"); | LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n"); | ||||
eraseInstruction(M); | eraseInstruction(M); | ||||
++NumCpyToSet; | ++NumCpyToSet; | ||||
return true; | return true; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
if (hasUndefContentsMSSA(MSSA, AA, M->getSource(), MD, CopySize)) { | if (hasUndefContentsMSSA(MSSA, AA, M->getSource(), MD, M->getLength())) { | ||||
LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n"); | LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n"); | ||||
eraseInstruction(M); | eraseInstruction(M); | ||||
++NumMemCpyInstr; | ++NumMemCpyInstr; | ||||
return true; | return true; | ||||
} | } | ||||
} | } | ||||
} else { | } else { | ||||
MemDepResult DepInfo = MD->getDependency(M); | MemDepResult DepInfo = MD->getDependency(M); | ||||
// Try to turn a partially redundant memset + memcpy into | // Try to turn a partially redundant memset + memcpy into | ||||
// memcpy + smaller memset. We don't need the memcpy size for this. | // memcpy + smaller memset. We don't need the memcpy size for this. | ||||
if (DepInfo.isClobber()) | if (DepInfo.isClobber()) | ||||
if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) | if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) | ||||
if (processMemSetMemCpyDependence(M, MDep)) | if (processMemSetMemCpyDependence(M, MDep)) | ||||
return true; | return true; | ||||
// The optimizations after this point require the memcpy size. | |||||
ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); | |||||
if (!CopySize) return false; | |||||
// There are four possible optimizations we can do for memcpy: | // There are four possible optimizations we can do for memcpy: | ||||
// a) memcpy-memcpy xform which exposes redundance for DSE. | // a) memcpy-memcpy xform which exposes redundance for DSE. | ||||
// b) call-memcpy xform for return slot optimization. | // b) call-memcpy xform for return slot optimization. | ||||
// c) memcpy from freshly alloca'd space or space that has just started | // c) memcpy from freshly alloca'd space or space that has just started | ||||
// its lifetime copies undefined data, and we can therefore eliminate | // its lifetime copies undefined data, and we can therefore eliminate | ||||
// the memcpy in favor of the data that was already at the destination. | // the memcpy in favor of the data that was already at the destination. | ||||
// d) memcpy from a just-memset'd source can be turned into memset. | // d) memcpy from a just-memset'd source can be turned into memset. | ||||
if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) { | |||||
if (DepInfo.isClobber()) { | if (DepInfo.isClobber()) { | ||||
if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { | if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { | ||||
// FIXME: Can we pass in either of dest/src alignment here instead | // FIXME: Can we pass in either of dest/src alignment here instead | ||||
// of conservatively taking the minimum? | // of conservatively taking the minimum? | ||||
Align Alignment = std::min(M->getDestAlign().valueOrOne(), | Align Alignment = std::min(M->getDestAlign().valueOrOne(), | ||||
M->getSourceAlign().valueOrOne()); | M->getSourceAlign().valueOrOne()); | ||||
if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), | if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), | ||||
CopySize->getZExtValue(), Alignment, C)) { | CopySize->getZExtValue(), Alignment, C)) { | ||||
eraseInstruction(M); | eraseInstruction(M); | ||||
++NumMemCpyInstr; | ++NumMemCpyInstr; | ||||
return true; | return true; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
} | |||||
MemoryLocation SrcLoc = MemoryLocation::getForSource(M); | MemoryLocation SrcLoc = MemoryLocation::getForSource(M); | ||||
MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( | MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( | ||||
SrcLoc, true, M->getIterator(), M->getParent()); | SrcLoc, true, M->getIterator(), M->getParent()); | ||||
if (SrcDepInfo.isClobber()) { | if (SrcDepInfo.isClobber()) { | ||||
if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) | if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) | ||||
return processMemCpyMemCpyDependence(M, MDep); | return processMemCpyMemCpyDependence(M, MDep); | ||||
} else if (SrcDepInfo.isDef()) { | } else if (SrcDepInfo.isDef()) { | ||||
if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) { | if (hasUndefContents(SrcDepInfo.getInst(), M->getLength())) { | ||||
eraseInstruction(M); | eraseInstruction(M); | ||||
++NumMemCpyInstr; | ++NumMemCpyInstr; | ||||
return true; | return true; | ||||
} | } | ||||
} | } | ||||
if (SrcDepInfo.isClobber()) | if (SrcDepInfo.isClobber()) | ||||
if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) | if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) | ||||
▲ Show 20 Lines • Show All 258 Lines • Show Last 20 Lines |
The constant size is only needed for the isMustAlias case below. The other one can work with a dynamic size.