diff --git a/lld/MachO/ConcatOutputSection.cpp b/lld/MachO/ConcatOutputSection.cpp --- a/lld/MachO/ConcatOutputSection.cpp +++ b/lld/MachO/ConcatOutputSection.cpp @@ -126,7 +126,7 @@ return false; uint64_t isecAddr = addr; for (ConcatInputSection *isec : inputs) - isecAddr = alignTo(isecAddr, isec->align) + isec->getSize(); + isecAddr = alignToPowerOf2(isecAddr, isec->align) + isec->getSize(); if (isecAddr - addr + in.stubs->getSize() <= std::min(target->backwardBranchRange, target->forwardBranchRange)) return false; @@ -172,7 +172,7 @@ uint64_t isecEnd = isecVA; for (size_t i = callIdx; i < inputs.size(); i++) { InputSection *isec = inputs[i]; - isecEnd = alignTo(isecEnd, isec->align) + isec->getSize(); + isecEnd = alignToPowerOf2(isecEnd, isec->align) + isec->getSize(); } // Estimate the address after which call sites can safely call stubs // directly rather than through intermediary thunks. @@ -194,8 +194,8 @@ } void ConcatOutputSection::finalizeOne(ConcatInputSection *isec) { - size = alignTo(size, isec->align); - fileSize = alignTo(fileSize, isec->align); + size = alignToPowerOf2(size, isec->align); + fileSize = alignToPowerOf2(fileSize, isec->align); isec->outSecOff = size; isec->isFinal = true; size += isec->getSize(); @@ -248,8 +248,9 @@ // grows. So leave room for a bunch of thunks. unsigned slop = 256 * thunkSize; while (finalIdx < endIdx) { - uint64_t expectedNewSize = alignTo(addr + size, inputs[finalIdx]->align) + - inputs[finalIdx]->getSize(); + uint64_t expectedNewSize = + alignToPowerOf2(addr + size, inputs[finalIdx]->align) + + inputs[finalIdx]->getSize(); if (expectedNewSize >= isecVA + forwardBranchRange - slop) break; finalizeOne(inputs[finalIdx++]); diff --git a/lld/MachO/SyntheticSections.cpp b/lld/MachO/SyntheticSections.cpp --- a/lld/MachO/SyntheticSections.cpp +++ b/lld/MachO/SyntheticSections.cpp @@ -95,7 +95,7 @@ // If we are emitting an encryptable binary, our load commands must have a // separate (non-encrypted) page to themselves. if (config->emitEncryptionInfo) - size = alignTo(size, target->getPageSize()); + size = alignToPowerOf2(size, target->getPageSize()); return size; } @@ -1642,7 +1642,7 @@ // handled. uint32_t pieceAlign = 1 << llvm::countr_zero(isec->align | piece.inSecOff); - offset = alignTo(offset, pieceAlign); + offset = alignToPowerOf2(offset, pieceAlign); piece.outSecOff = offset; isec->isFinal = true; StringRef string = isec->getStringRef(i); @@ -1717,7 +1717,8 @@ assert(it != stringOffsetMap.end()); StringOffset &offsetInfo = it->second; if (offsetInfo.outSecOff == UINT64_MAX) { - offsetInfo.outSecOff = alignTo(size, 1ULL << offsetInfo.trailingZeros); + offsetInfo.outSecOff = + alignToPowerOf2(size, 1ULL << offsetInfo.trailingZeros); size = offsetInfo.outSecOff + s.size() + 1; // account for null terminator } diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp --- a/lld/MachO/Writer.cpp +++ b/lld/MachO/Writer.cpp @@ -1077,9 +1077,10 @@ seg->addr = addr; assignAddresses(seg); // codesign / libstuff checks for segment ordering by verifying that - // `fileOff + fileSize == next segment fileOff`. So we call alignTo() before - // (instead of after) computing fileSize to ensure that the segments are - // contiguous. We handle addr / vmSize similarly for the same reason. + // `fileOff + fileSize == next segment fileOff`. So we call + // alignToPowerOf2() before (instead of after) computing fileSize to ensure + // that the segments are contiguous. We handle addr / vmSize similarly for + // the same reason. fileOff = alignToPowerOf2(fileOff, pageSize); addr = alignToPowerOf2(addr, pageSize); seg->vmSize = addr - seg->addr; @@ -1122,8 +1123,8 @@ for (OutputSection *osec : seg->getSections()) { if (!osec->isNeeded()) continue; - addr = alignTo(addr, osec->align); - fileOff = alignTo(fileOff, osec->align); + addr = alignToPowerOf2(addr, osec->align); + fileOff = alignToPowerOf2(fileOff, osec->align); osec->addr = addr; osec->fileOff = isZeroFill(osec->flags) ? 0 : fileOff; osec->finalize();