llvm-capstone/lld/MachO/Writer.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1375 lines
47 KiB
C++
Raw Normal View History

//===- Writer.cpp ---------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "Writer.h"
#include "ConcatOutputSection.h"
#include "Config.h"
#include "InputFiles.h"
#include "InputSection.h"
#include "MapFile.h"
#include "OutputSection.h"
#include "OutputSegment.h"
#include "SectionPriorities.h"
#include "SymbolTable.h"
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
#include "UnwindInfoSection.h"
#include "lld/Common/Arrays.h"
#include "lld/Common/CommonLinkerContext.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/Parallel.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/ThreadPool.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/xxhash.h"
#include <algorithm>
using namespace llvm;
using namespace llvm::MachO;
using namespace llvm::sys;
using namespace lld;
using namespace lld::macho;
namespace {
class LCUuid;
class Writer {
public:
Writer() : buffer(errorHandler().outputBuffer) {}
void treatSpecialUndefineds();
void scanRelocations();
void scanSymbols();
template <class LP> void createOutputSections();
template <class LP> void createLoadCommands();
void finalizeAddresses();
void finalizeLinkEditSegment();
void assignAddresses(OutputSegment *);
void openFile();
void writeSections();
void applyOptimizationHints();
void buildFixupChains();
void writeUuid();
void writeCodeSignature();
void writeOutputFile();
template <class LP> void run();
ThreadPool threadPool;
std::unique_ptr<FileOutputBuffer> &buffer;
uint64_t addr = 0;
uint64_t fileOff = 0;
MachHeaderSection *header = nullptr;
[lld-macho][reland] Add basic symbol table output This diff implements basic support for writing a symbol table. Attributes are loosely supported for extern symbols and not at all for other types. Initial version by Kellie Medlin <kelliem@fb.com> Originally committed in a3d95a50ee33 and reverted in fbae153ca583 due to UBSAN erroring over unaligned writes. That has been fixed in the current diff with the following changes: ``` diff --git a/lld/MachO/SyntheticSections.cpp b/lld/MachO/SyntheticSections.cpp --- a/lld/MachO/SyntheticSections.cpp +++ b/lld/MachO/SyntheticSections.cpp @@ -133,6 +133,9 @@ SymtabSection::SymtabSection(StringTableSection &stringTableSection) : stringTableSection(stringTableSection) { segname = segment_names::linkEdit; name = section_names::symbolTable; + // TODO: When we introduce the SyntheticSections superclass, we should make + // all synthetic sections aligned to WordSize by default. + align = WordSize; } size_t SymtabSection::getSize() const { diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp --- a/lld/MachO/Writer.cpp +++ b/lld/MachO/Writer.cpp @@ -371,6 +371,7 @@ void Writer::assignAddresses(OutputSegment *seg) { ArrayRef<InputSection *> sections = p.second; for (InputSection *isec : sections) { addr = alignTo(addr, isec->align); + // We must align the file offsets too to avoid misaligned writes of + // structs. + fileOff = alignTo(fileOff, isec->align); isec->addr = addr; addr += isec->getSize(); fileOff += isec->getFileSize(); @@ -396,6 +397,7 @@ void Writer::writeSections() { uint64_t fileOff = seg->fileOff; for (auto &sect : seg->getSections()) { for (InputSection *isec : sect.second) { + fileOff = alignTo(fileOff, isec->align); isec->writeTo(buf + fileOff); fileOff += isec->getFileSize(); } ``` I don't think it's easy to write a test for alignment (that doesn't involve brittly hard-coding file offsets), so there isn't one... but UBSAN builds pass now. Differential Revision: https://reviews.llvm.org/D79050
2020-04-28 23:58:19 +00:00
StringTableSection *stringTableSection = nullptr;
SymtabSection *symtabSection = nullptr;
IndirectSymtabSection *indirectSymtabSection = nullptr;
CodeSignatureSection *codeSignatureSection = nullptr;
DataInCodeSection *dataInCodeSection = nullptr;
FunctionStartsSection *functionStartsSection = nullptr;
LCUuid *uuidCommand = nullptr;
OutputSegment *linkEditSegment = nullptr;
};
// LC_DYLD_INFO_ONLY stores the offsets of symbol import/export information.
class LCDyldInfo final : public LoadCommand {
public:
LCDyldInfo(RebaseSection *rebaseSection, BindingSection *bindingSection,
WeakBindingSection *weakBindingSection,
LazyBindingSection *lazyBindingSection,
ExportSection *exportSection)
: rebaseSection(rebaseSection), bindingSection(bindingSection),
weakBindingSection(weakBindingSection),
lazyBindingSection(lazyBindingSection), exportSection(exportSection) {}
uint32_t getSize() const override { return sizeof(dyld_info_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<dyld_info_command *>(buf);
c->cmd = LC_DYLD_INFO_ONLY;
c->cmdsize = getSize();
if (rebaseSection->isNeeded()) {
c->rebase_off = rebaseSection->fileOff;
c->rebase_size = rebaseSection->getFileSize();
}
if (bindingSection->isNeeded()) {
c->bind_off = bindingSection->fileOff;
c->bind_size = bindingSection->getFileSize();
}
if (weakBindingSection->isNeeded()) {
c->weak_bind_off = weakBindingSection->fileOff;
c->weak_bind_size = weakBindingSection->getFileSize();
}
if (lazyBindingSection->isNeeded()) {
c->lazy_bind_off = lazyBindingSection->fileOff;
c->lazy_bind_size = lazyBindingSection->getFileSize();
}
if (exportSection->isNeeded()) {
c->export_off = exportSection->fileOff;
c->export_size = exportSection->getFileSize();
}
}
RebaseSection *rebaseSection;
BindingSection *bindingSection;
WeakBindingSection *weakBindingSection;
LazyBindingSection *lazyBindingSection;
ExportSection *exportSection;
};
class LCSubFramework final : public LoadCommand {
public:
LCSubFramework(StringRef umbrella) : umbrella(umbrella) {}
uint32_t getSize() const override {
return alignToPowerOf2(sizeof(sub_framework_command) + umbrella.size() + 1,
target->wordSize);
}
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<sub_framework_command *>(buf);
buf += sizeof(sub_framework_command);
c->cmd = LC_SUB_FRAMEWORK;
c->cmdsize = getSize();
c->umbrella = sizeof(sub_framework_command);
memcpy(buf, umbrella.data(), umbrella.size());
buf[umbrella.size()] = '\0';
}
private:
const StringRef umbrella;
};
class LCFunctionStarts final : public LoadCommand {
public:
explicit LCFunctionStarts(FunctionStartsSection *functionStartsSection)
: functionStartsSection(functionStartsSection) {}
uint32_t getSize() const override { return sizeof(linkedit_data_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<linkedit_data_command *>(buf);
c->cmd = LC_FUNCTION_STARTS;
c->cmdsize = getSize();
c->dataoff = functionStartsSection->fileOff;
c->datasize = functionStartsSection->getFileSize();
}
private:
FunctionStartsSection *functionStartsSection;
};
class LCDataInCode final : public LoadCommand {
public:
explicit LCDataInCode(DataInCodeSection *dataInCodeSection)
: dataInCodeSection(dataInCodeSection) {}
uint32_t getSize() const override { return sizeof(linkedit_data_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<linkedit_data_command *>(buf);
c->cmd = LC_DATA_IN_CODE;
c->cmdsize = getSize();
c->dataoff = dataInCodeSection->fileOff;
c->datasize = dataInCodeSection->getFileSize();
}
private:
DataInCodeSection *dataInCodeSection;
};
class LCDysymtab final : public LoadCommand {
public:
LCDysymtab(SymtabSection *symtabSection,
IndirectSymtabSection *indirectSymtabSection)
: symtabSection(symtabSection),
indirectSymtabSection(indirectSymtabSection) {}
uint32_t getSize() const override { return sizeof(dysymtab_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<dysymtab_command *>(buf);
c->cmd = LC_DYSYMTAB;
c->cmdsize = getSize();
c->ilocalsym = 0;
c->iextdefsym = c->nlocalsym = symtabSection->getNumLocalSymbols();
c->nextdefsym = symtabSection->getNumExternalSymbols();
c->iundefsym = c->iextdefsym + c->nextdefsym;
c->nundefsym = symtabSection->getNumUndefinedSymbols();
c->indirectsymoff = indirectSymtabSection->fileOff;
c->nindirectsyms = indirectSymtabSection->getNumSymbols();
}
SymtabSection *symtabSection;
IndirectSymtabSection *indirectSymtabSection;
};
template <class LP> class LCSegment final : public LoadCommand {
public:
LCSegment(StringRef name, OutputSegment *seg) : name(name), seg(seg) {}
uint32_t getSize() const override {
return sizeof(typename LP::segment_command) +
seg->numNonHiddenSections() * sizeof(typename LP::section);
}
void writeTo(uint8_t *buf) const override {
using SegmentCommand = typename LP::segment_command;
using SectionHeader = typename LP::section;
auto *c = reinterpret_cast<SegmentCommand *>(buf);
buf += sizeof(SegmentCommand);
c->cmd = LP::segmentLCType;
c->cmdsize = getSize();
memcpy(c->segname, name.data(), name.size());
c->fileoff = seg->fileOff;
c->maxprot = seg->maxProt;
c->initprot = seg->initProt;
c->vmaddr = seg->addr;
c->vmsize = seg->vmSize;
c->filesize = seg->fileSize;
c->nsects = seg->numNonHiddenSections();
c->flags = seg->flags;
for (const OutputSection *osec : seg->getSections()) {
[lld-macho] Refactor segment/section creation, sorting, and merging Summary: There were a few issues with the previous setup: 1. The section sorting comparator used a declarative map of section names to determine the correct order, but it turns out we need to match on more than just names -- in particular, an upcoming diff will sort based on whether the S_ZERO_FILL flag is set. This diff changes the sorter to a more imperative but flexible form. 2. We were sorting OutputSections stored in a MapVector, which left the MapVector in an inconsistent state -- the wrong keys map to the wrong values! In practice, we weren't doing key lookups (only container iteration) after the sort, so this was fine, but it was still a dubious state of affairs. This diff copies the OutputSections to a vector before sorting them. 3. We were adding unneeded OutputSections to OutputSegments and then filtering them out later, which meant that we had to remember whether an OutputSegment was in a pre- or post-filtered state. This diff only adds the sections to the segments if they are needed. In addition to those major changes, two minor ones worth noting: 1. I renamed all OutputSection variable names to `osec`, to parallel `isec`. Previously we were using some inconsistent combination of `osec`, `os`, and `section`. 2. I added a check (and a test) for InputSections with names that clashed with those of our synthetic OutputSections. Reviewers: #lld-macho Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81887
2020-06-15 07:03:24 +00:00
if (osec->isHidden())
continue;
auto *sectHdr = reinterpret_cast<SectionHeader *>(buf);
buf += sizeof(SectionHeader);
[lld-macho] Refactor segment/section creation, sorting, and merging Summary: There were a few issues with the previous setup: 1. The section sorting comparator used a declarative map of section names to determine the correct order, but it turns out we need to match on more than just names -- in particular, an upcoming diff will sort based on whether the S_ZERO_FILL flag is set. This diff changes the sorter to a more imperative but flexible form. 2. We were sorting OutputSections stored in a MapVector, which left the MapVector in an inconsistent state -- the wrong keys map to the wrong values! In practice, we weren't doing key lookups (only container iteration) after the sort, so this was fine, but it was still a dubious state of affairs. This diff copies the OutputSections to a vector before sorting them. 3. We were adding unneeded OutputSections to OutputSegments and then filtering them out later, which meant that we had to remember whether an OutputSegment was in a pre- or post-filtered state. This diff only adds the sections to the segments if they are needed. In addition to those major changes, two minor ones worth noting: 1. I renamed all OutputSection variable names to `osec`, to parallel `isec`. Previously we were using some inconsistent combination of `osec`, `os`, and `section`. 2. I added a check (and a test) for InputSections with names that clashed with those of our synthetic OutputSections. Reviewers: #lld-macho Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81887
2020-06-15 07:03:24 +00:00
memcpy(sectHdr->sectname, osec->name.data(), osec->name.size());
memcpy(sectHdr->segname, name.data(), name.size());
[lld-macho] Refactor segment/section creation, sorting, and merging Summary: There were a few issues with the previous setup: 1. The section sorting comparator used a declarative map of section names to determine the correct order, but it turns out we need to match on more than just names -- in particular, an upcoming diff will sort based on whether the S_ZERO_FILL flag is set. This diff changes the sorter to a more imperative but flexible form. 2. We were sorting OutputSections stored in a MapVector, which left the MapVector in an inconsistent state -- the wrong keys map to the wrong values! In practice, we weren't doing key lookups (only container iteration) after the sort, so this was fine, but it was still a dubious state of affairs. This diff copies the OutputSections to a vector before sorting them. 3. We were adding unneeded OutputSections to OutputSegments and then filtering them out later, which meant that we had to remember whether an OutputSegment was in a pre- or post-filtered state. This diff only adds the sections to the segments if they are needed. In addition to those major changes, two minor ones worth noting: 1. I renamed all OutputSection variable names to `osec`, to parallel `isec`. Previously we were using some inconsistent combination of `osec`, `os`, and `section`. 2. I added a check (and a test) for InputSections with names that clashed with those of our synthetic OutputSections. Reviewers: #lld-macho Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81887
2020-06-15 07:03:24 +00:00
sectHdr->addr = osec->addr;
sectHdr->offset = osec->fileOff;
sectHdr->align = Log2_32(osec->align);
sectHdr->flags = osec->flags;
sectHdr->size = osec->getSize();
sectHdr->reserved1 = osec->reserved1;
sectHdr->reserved2 = osec->reserved2;
}
}
private:
StringRef name;
OutputSegment *seg;
};
class LCMain final : public LoadCommand {
uint32_t getSize() const override {
return sizeof(structs::entry_point_command);
}
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<structs::entry_point_command *>(buf);
c->cmd = LC_MAIN;
c->cmdsize = getSize();
if (config->entry->isInStubs())
c->entryoff =
in.stubs->fileOff + config->entry->stubsIndex * target->stubSize;
else
c->entryoff = config->entry->getVA() - in.header->addr;
c->stacksize = 0;
}
};
class LCSymtab final : public LoadCommand {
public:
[lld-macho][reland] Add basic symbol table output This diff implements basic support for writing a symbol table. Attributes are loosely supported for extern symbols and not at all for other types. Initial version by Kellie Medlin <kelliem@fb.com> Originally committed in a3d95a50ee33 and reverted in fbae153ca583 due to UBSAN erroring over unaligned writes. That has been fixed in the current diff with the following changes: ``` diff --git a/lld/MachO/SyntheticSections.cpp b/lld/MachO/SyntheticSections.cpp --- a/lld/MachO/SyntheticSections.cpp +++ b/lld/MachO/SyntheticSections.cpp @@ -133,6 +133,9 @@ SymtabSection::SymtabSection(StringTableSection &stringTableSection) : stringTableSection(stringTableSection) { segname = segment_names::linkEdit; name = section_names::symbolTable; + // TODO: When we introduce the SyntheticSections superclass, we should make + // all synthetic sections aligned to WordSize by default. + align = WordSize; } size_t SymtabSection::getSize() const { diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp --- a/lld/MachO/Writer.cpp +++ b/lld/MachO/Writer.cpp @@ -371,6 +371,7 @@ void Writer::assignAddresses(OutputSegment *seg) { ArrayRef<InputSection *> sections = p.second; for (InputSection *isec : sections) { addr = alignTo(addr, isec->align); + // We must align the file offsets too to avoid misaligned writes of + // structs. + fileOff = alignTo(fileOff, isec->align); isec->addr = addr; addr += isec->getSize(); fileOff += isec->getFileSize(); @@ -396,6 +397,7 @@ void Writer::writeSections() { uint64_t fileOff = seg->fileOff; for (auto &sect : seg->getSections()) { for (InputSection *isec : sect.second) { + fileOff = alignTo(fileOff, isec->align); isec->writeTo(buf + fileOff); fileOff += isec->getFileSize(); } ``` I don't think it's easy to write a test for alignment (that doesn't involve brittly hard-coding file offsets), so there isn't one... but UBSAN builds pass now. Differential Revision: https://reviews.llvm.org/D79050
2020-04-28 23:58:19 +00:00
LCSymtab(SymtabSection *symtabSection, StringTableSection *stringTableSection)
: symtabSection(symtabSection), stringTableSection(stringTableSection) {}
uint32_t getSize() const override { return sizeof(symtab_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<symtab_command *>(buf);
c->cmd = LC_SYMTAB;
c->cmdsize = getSize();
c->symoff = symtabSection->fileOff;
[lld-macho][reland] Add basic symbol table output This diff implements basic support for writing a symbol table. Attributes are loosely supported for extern symbols and not at all for other types. Initial version by Kellie Medlin <kelliem@fb.com> Originally committed in a3d95a50ee33 and reverted in fbae153ca583 due to UBSAN erroring over unaligned writes. That has been fixed in the current diff with the following changes: ``` diff --git a/lld/MachO/SyntheticSections.cpp b/lld/MachO/SyntheticSections.cpp --- a/lld/MachO/SyntheticSections.cpp +++ b/lld/MachO/SyntheticSections.cpp @@ -133,6 +133,9 @@ SymtabSection::SymtabSection(StringTableSection &stringTableSection) : stringTableSection(stringTableSection) { segname = segment_names::linkEdit; name = section_names::symbolTable; + // TODO: When we introduce the SyntheticSections superclass, we should make + // all synthetic sections aligned to WordSize by default. + align = WordSize; } size_t SymtabSection::getSize() const { diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp --- a/lld/MachO/Writer.cpp +++ b/lld/MachO/Writer.cpp @@ -371,6 +371,7 @@ void Writer::assignAddresses(OutputSegment *seg) { ArrayRef<InputSection *> sections = p.second; for (InputSection *isec : sections) { addr = alignTo(addr, isec->align); + // We must align the file offsets too to avoid misaligned writes of + // structs. + fileOff = alignTo(fileOff, isec->align); isec->addr = addr; addr += isec->getSize(); fileOff += isec->getFileSize(); @@ -396,6 +397,7 @@ void Writer::writeSections() { uint64_t fileOff = seg->fileOff; for (auto &sect : seg->getSections()) { for (InputSection *isec : sect.second) { + fileOff = alignTo(fileOff, isec->align); isec->writeTo(buf + fileOff); fileOff += isec->getFileSize(); } ``` I don't think it's easy to write a test for alignment (that doesn't involve brittly hard-coding file offsets), so there isn't one... but UBSAN builds pass now. Differential Revision: https://reviews.llvm.org/D79050
2020-04-28 23:58:19 +00:00
c->nsyms = symtabSection->getNumSymbols();
c->stroff = stringTableSection->fileOff;
[lld-macho][reland] Add basic symbol table output This diff implements basic support for writing a symbol table. Attributes are loosely supported for extern symbols and not at all for other types. Initial version by Kellie Medlin <kelliem@fb.com> Originally committed in a3d95a50ee33 and reverted in fbae153ca583 due to UBSAN erroring over unaligned writes. That has been fixed in the current diff with the following changes: ``` diff --git a/lld/MachO/SyntheticSections.cpp b/lld/MachO/SyntheticSections.cpp --- a/lld/MachO/SyntheticSections.cpp +++ b/lld/MachO/SyntheticSections.cpp @@ -133,6 +133,9 @@ SymtabSection::SymtabSection(StringTableSection &stringTableSection) : stringTableSection(stringTableSection) { segname = segment_names::linkEdit; name = section_names::symbolTable; + // TODO: When we introduce the SyntheticSections superclass, we should make + // all synthetic sections aligned to WordSize by default. + align = WordSize; } size_t SymtabSection::getSize() const { diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp --- a/lld/MachO/Writer.cpp +++ b/lld/MachO/Writer.cpp @@ -371,6 +371,7 @@ void Writer::assignAddresses(OutputSegment *seg) { ArrayRef<InputSection *> sections = p.second; for (InputSection *isec : sections) { addr = alignTo(addr, isec->align); + // We must align the file offsets too to avoid misaligned writes of + // structs. + fileOff = alignTo(fileOff, isec->align); isec->addr = addr; addr += isec->getSize(); fileOff += isec->getFileSize(); @@ -396,6 +397,7 @@ void Writer::writeSections() { uint64_t fileOff = seg->fileOff; for (auto &sect : seg->getSections()) { for (InputSection *isec : sect.second) { + fileOff = alignTo(fileOff, isec->align); isec->writeTo(buf + fileOff); fileOff += isec->getFileSize(); } ``` I don't think it's easy to write a test for alignment (that doesn't involve brittly hard-coding file offsets), so there isn't one... but UBSAN builds pass now. Differential Revision: https://reviews.llvm.org/D79050
2020-04-28 23:58:19 +00:00
c->strsize = stringTableSection->getFileSize();
}
[lld-macho][reland] Add basic symbol table output This diff implements basic support for writing a symbol table. Attributes are loosely supported for extern symbols and not at all for other types. Initial version by Kellie Medlin <kelliem@fb.com> Originally committed in a3d95a50ee33 and reverted in fbae153ca583 due to UBSAN erroring over unaligned writes. That has been fixed in the current diff with the following changes: ``` diff --git a/lld/MachO/SyntheticSections.cpp b/lld/MachO/SyntheticSections.cpp --- a/lld/MachO/SyntheticSections.cpp +++ b/lld/MachO/SyntheticSections.cpp @@ -133,6 +133,9 @@ SymtabSection::SymtabSection(StringTableSection &stringTableSection) : stringTableSection(stringTableSection) { segname = segment_names::linkEdit; name = section_names::symbolTable; + // TODO: When we introduce the SyntheticSections superclass, we should make + // all synthetic sections aligned to WordSize by default. + align = WordSize; } size_t SymtabSection::getSize() const { diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp --- a/lld/MachO/Writer.cpp +++ b/lld/MachO/Writer.cpp @@ -371,6 +371,7 @@ void Writer::assignAddresses(OutputSegment *seg) { ArrayRef<InputSection *> sections = p.second; for (InputSection *isec : sections) { addr = alignTo(addr, isec->align); + // We must align the file offsets too to avoid misaligned writes of + // structs. + fileOff = alignTo(fileOff, isec->align); isec->addr = addr; addr += isec->getSize(); fileOff += isec->getFileSize(); @@ -396,6 +397,7 @@ void Writer::writeSections() { uint64_t fileOff = seg->fileOff; for (auto &sect : seg->getSections()) { for (InputSection *isec : sect.second) { + fileOff = alignTo(fileOff, isec->align); isec->writeTo(buf + fileOff); fileOff += isec->getFileSize(); } ``` I don't think it's easy to write a test for alignment (that doesn't involve brittly hard-coding file offsets), so there isn't one... but UBSAN builds pass now. Differential Revision: https://reviews.llvm.org/D79050
2020-04-28 23:58:19 +00:00
SymtabSection *symtabSection = nullptr;
StringTableSection *stringTableSection = nullptr;
};
// There are several dylib load commands that share the same structure:
// * LC_LOAD_DYLIB
// * LC_ID_DYLIB
// * LC_REEXPORT_DYLIB
class LCDylib final : public LoadCommand {
public:
LCDylib(LoadCommandType type, StringRef path,
uint32_t compatibilityVersion = 0, uint32_t currentVersion = 0)
: type(type), path(path), compatibilityVersion(compatibilityVersion),
currentVersion(currentVersion) {
instanceCount++;
}
uint32_t getSize() const override {
return alignToPowerOf2(sizeof(dylib_command) + path.size() + 1,
target->wordSize);
}
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<dylib_command *>(buf);
buf += sizeof(dylib_command);
c->cmd = type;
c->cmdsize = getSize();
c->dylib.name = sizeof(dylib_command);
c->dylib.timestamp = 0;
c->dylib.compatibility_version = compatibilityVersion;
c->dylib.current_version = currentVersion;
memcpy(buf, path.data(), path.size());
buf[path.size()] = '\0';
}
static uint32_t getInstanceCount() { return instanceCount; }
static void resetInstanceCount() { instanceCount = 0; }
private:
LoadCommandType type;
StringRef path;
uint32_t compatibilityVersion;
uint32_t currentVersion;
static uint32_t instanceCount;
};
uint32_t LCDylib::instanceCount = 0;
class LCLoadDylinker final : public LoadCommand {
public:
uint32_t getSize() const override {
return alignToPowerOf2(sizeof(dylinker_command) + path.size() + 1,
target->wordSize);
}
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<dylinker_command *>(buf);
buf += sizeof(dylinker_command);
c->cmd = LC_LOAD_DYLINKER;
c->cmdsize = getSize();
c->name = sizeof(dylinker_command);
memcpy(buf, path.data(), path.size());
buf[path.size()] = '\0';
}
private:
// Recent versions of Darwin won't run any binary that has dyld at a
// different location.
const StringRef path = "/usr/lib/dyld";
};
class LCRPath final : public LoadCommand {
public:
explicit LCRPath(StringRef path) : path(path) {}
uint32_t getSize() const override {
return alignToPowerOf2(sizeof(rpath_command) + path.size() + 1,
target->wordSize);
}
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<rpath_command *>(buf);
buf += sizeof(rpath_command);
c->cmd = LC_RPATH;
c->cmdsize = getSize();
c->path = sizeof(rpath_command);
memcpy(buf, path.data(), path.size());
buf[path.size()] = '\0';
}
private:
StringRef path;
};
class LCDyldEnv final : public LoadCommand {
public:
explicit LCDyldEnv(StringRef name) : name(name) {}
uint32_t getSize() const override {
return alignToPowerOf2(sizeof(dyld_env_command) + name.size() + 1,
target->wordSize);
}
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<dyld_env_command *>(buf);
buf += sizeof(dyld_env_command);
c->cmd = LC_DYLD_ENVIRONMENT;
c->cmdsize = getSize();
c->name = sizeof(dyld_env_command);
memcpy(buf, name.data(), name.size());
buf[name.size()] = '\0';
}
private:
StringRef name;
};
class LCMinVersion final : public LoadCommand {
public:
explicit LCMinVersion(const PlatformInfo &platformInfo)
: platformInfo(platformInfo) {}
uint32_t getSize() const override { return sizeof(version_min_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<version_min_command *>(buf);
switch (platformInfo.target.Platform) {
case PLATFORM_MACOS:
c->cmd = LC_VERSION_MIN_MACOSX;
break;
case PLATFORM_IOS:
case PLATFORM_IOSSIMULATOR:
c->cmd = LC_VERSION_MIN_IPHONEOS;
break;
case PLATFORM_TVOS:
case PLATFORM_TVOSSIMULATOR:
c->cmd = LC_VERSION_MIN_TVOS;
break;
case PLATFORM_WATCHOS:
case PLATFORM_WATCHOSSIMULATOR:
c->cmd = LC_VERSION_MIN_WATCHOS;
break;
default:
llvm_unreachable("invalid platform");
break;
}
c->cmdsize = getSize();
c->version = encodeVersion(platformInfo.target.MinDeployment);
c->sdk = encodeVersion(platformInfo.sdk);
}
private:
const PlatformInfo &platformInfo;
};
class LCBuildVersion final : public LoadCommand {
public:
explicit LCBuildVersion(const PlatformInfo &platformInfo)
: platformInfo(platformInfo) {}
const int ntools = 1;
uint32_t getSize() const override {
return sizeof(build_version_command) + ntools * sizeof(build_tool_version);
}
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<build_version_command *>(buf);
c->cmd = LC_BUILD_VERSION;
c->cmdsize = getSize();
[lld/mac] Support writing zippered dylibs and bundles With -platform_version flags for two distinct platforms, this writes a LC_BUILD_VERSION header for each. The motivation is that this is needed for self-hosting with lld as linker after D124059. To create a zippered output at the clang driver level, pass -target arm64-apple-macos -darwin-target-variant arm64-apple-ios-macabi to create a zippered dylib. (In Xcode's clang, `-darwin-target-variant` is spelled just `-target-variant`.) (If you pass `-target arm64-apple-ios-macabi -target-variant arm64-apple-macos` instead, ld64 crashes!) This results in two -platform_version flags being passed to the linker. ld64 also verifies that the iOS SDK version is at least 13.1. We don't do that yet. But ld64 also does that for other platforms and we don't. So we need to do that at some point, but not in this patch. Only dylib and bundle outputs can be zippered. I verified that a Catalyst app linked against a dylib created with clang -shared foo.cc -o libfoo.dylib \ -target arm64-apple-macos \ -target-variant arm64-apple-ios-macabi \ -Wl,-install_name,@rpath/libfoo.dylib \ -fuse-ld=$PWD/out/gn/bin/ld64.lld runs successfully. (The app calls a function `f()` in libfoo.dylib that returns a const char* "foo", and NSLog(@"%s")s it.) ld64 is a bit more permissive when writing zippered outputs, see references to "unzippered twins". That's not implemented yet. (If anybody wants to implement that, D124275 is a good start.) Differential Revision: https://reviews.llvm.org/D124887
2022-04-22 15:55:50 +00:00
c->platform = static_cast<uint32_t>(platformInfo.target.Platform);
c->minos = encodeVersion(platformInfo.target.MinDeployment);
c->sdk = encodeVersion(platformInfo.sdk);
[lld/mac] Support writing zippered dylibs and bundles With -platform_version flags for two distinct platforms, this writes a LC_BUILD_VERSION header for each. The motivation is that this is needed for self-hosting with lld as linker after D124059. To create a zippered output at the clang driver level, pass -target arm64-apple-macos -darwin-target-variant arm64-apple-ios-macabi to create a zippered dylib. (In Xcode's clang, `-darwin-target-variant` is spelled just `-target-variant`.) (If you pass `-target arm64-apple-ios-macabi -target-variant arm64-apple-macos` instead, ld64 crashes!) This results in two -platform_version flags being passed to the linker. ld64 also verifies that the iOS SDK version is at least 13.1. We don't do that yet. But ld64 also does that for other platforms and we don't. So we need to do that at some point, but not in this patch. Only dylib and bundle outputs can be zippered. I verified that a Catalyst app linked against a dylib created with clang -shared foo.cc -o libfoo.dylib \ -target arm64-apple-macos \ -target-variant arm64-apple-ios-macabi \ -Wl,-install_name,@rpath/libfoo.dylib \ -fuse-ld=$PWD/out/gn/bin/ld64.lld runs successfully. (The app calls a function `f()` in libfoo.dylib that returns a const char* "foo", and NSLog(@"%s")s it.) ld64 is a bit more permissive when writing zippered outputs, see references to "unzippered twins". That's not implemented yet. (If anybody wants to implement that, D124275 is a good start.) Differential Revision: https://reviews.llvm.org/D124887
2022-04-22 15:55:50 +00:00
c->ntools = ntools;
auto *t = reinterpret_cast<build_tool_version *>(&c[1]);
t->tool = TOOL_LLD;
t->version = encodeVersion(VersionTuple(
LLVM_VERSION_MAJOR, LLVM_VERSION_MINOR, LLVM_VERSION_PATCH));
}
private:
const PlatformInfo &platformInfo;
};
// Stores a unique identifier for the output file based on an MD5 hash of its
// contents. In order to hash the contents, we must first write them, but
// LC_UUID itself must be part of the written contents in order for all the
// offsets to be calculated correctly. We resolve this circular paradox by
// first writing an LC_UUID with an all-zero UUID, then updating the UUID with
// its real value later.
class LCUuid final : public LoadCommand {
public:
uint32_t getSize() const override { return sizeof(uuid_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<uuid_command *>(buf);
c->cmd = LC_UUID;
c->cmdsize = getSize();
uuidBuf = c->uuid;
}
void writeUuid(uint64_t digest) const {
// xxhash only gives us 8 bytes, so put some fixed data in the other half.
static_assert(sizeof(uuid_command::uuid) == 16, "unexpected uuid size");
memcpy(uuidBuf, "LLD\xa1UU1D", 8);
memcpy(uuidBuf + 8, &digest, 8);
// RFC 4122 conformance. We need to fix 4 bits in byte 6 and 2 bits in
// byte 8. Byte 6 is already fine due to the fixed data we put in. We don't
// want to lose bits of the digest in byte 8, so swap that with a byte of
// fixed data that happens to have the right bits set.
std::swap(uuidBuf[3], uuidBuf[8]);
// Claim that this is an MD5-based hash. It isn't, but this signals that
// this is not a time-based and not a random hash. MD5 seems like the least
// bad lie we can put here.
assert((uuidBuf[6] & 0xf0) == 0x30 && "See RFC 4122 Sections 4.2.2, 4.1.3");
assert((uuidBuf[8] & 0xc0) == 0x80 && "See RFC 4122 Section 4.2.2");
}
mutable uint8_t *uuidBuf;
};
template <class LP> class LCEncryptionInfo final : public LoadCommand {
public:
uint32_t getSize() const override {
return sizeof(typename LP::encryption_info_command);
}
void writeTo(uint8_t *buf) const override {
using EncryptionInfo = typename LP::encryption_info_command;
auto *c = reinterpret_cast<EncryptionInfo *>(buf);
buf += sizeof(EncryptionInfo);
c->cmd = LP::encryptionInfoLCType;
c->cmdsize = getSize();
c->cryptoff = in.header->getSize();
auto it = find_if(outputSegments, [](const OutputSegment *seg) {
return seg->name == segment_names::text;
});
assert(it != outputSegments.end());
c->cryptsize = (*it)->fileSize - c->cryptoff;
}
};
class LCCodeSignature final : public LoadCommand {
public:
LCCodeSignature(CodeSignatureSection *section) : section(section) {}
uint32_t getSize() const override { return sizeof(linkedit_data_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<linkedit_data_command *>(buf);
c->cmd = LC_CODE_SIGNATURE;
c->cmdsize = getSize();
c->dataoff = static_cast<uint32_t>(section->fileOff);
c->datasize = section->getSize();
}
CodeSignatureSection *section;
};
class LCExportsTrie final : public LoadCommand {
public:
LCExportsTrie(ExportSection *section) : section(section) {}
uint32_t getSize() const override { return sizeof(linkedit_data_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<linkedit_data_command *>(buf);
c->cmd = LC_DYLD_EXPORTS_TRIE;
c->cmdsize = getSize();
c->dataoff = section->fileOff;
c->datasize = section->getSize();
}
ExportSection *section;
};
class LCChainedFixups final : public LoadCommand {
public:
LCChainedFixups(ChainedFixupsSection *section) : section(section) {}
uint32_t getSize() const override { return sizeof(linkedit_data_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<linkedit_data_command *>(buf);
c->cmd = LC_DYLD_CHAINED_FIXUPS;
c->cmdsize = getSize();
c->dataoff = section->fileOff;
c->datasize = section->getSize();
}
ChainedFixupsSection *section;
};
} // namespace
void Writer::treatSpecialUndefineds() {
if (config->entry)
if (auto *undefined = dyn_cast<Undefined>(config->entry))
treatUndefinedSymbol(*undefined, "the entry point");
// FIXME: This prints symbols that are undefined both in input files and
// via -u flag twice.
for (const Symbol *sym : config->explicitUndefineds) {
if (const auto *undefined = dyn_cast<Undefined>(sym))
treatUndefinedSymbol(*undefined, "-u");
}
// Literal exported-symbol names must be defined, but glob
// patterns need not match.
for (const CachedHashStringRef &cachedName :
config->exportedSymbols.literals) {
if (const Symbol *sym = symtab->find(cachedName))
if (const auto *undefined = dyn_cast<Undefined>(sym))
treatUndefinedSymbol(*undefined, "-exported_symbol(s_list)");
}
}
static void prepareSymbolRelocation(Symbol *sym, const InputSection *isec,
const lld::macho::Reloc &r) {
assert(sym->isLive());
const RelocAttrs &relocAttrs = target->getRelocAttrs(r.type);
if (relocAttrs.hasAttr(RelocAttrBits::BRANCH)) {
if (needsBinding(sym))
in.stubs->addEntry(sym);
} else if (relocAttrs.hasAttr(RelocAttrBits::GOT)) {
[lld-macho] Fix semantics & add tests for ARM64 GOT/TLV relocs I've adjusted the RelocAttrBits to better fit the semantics of the relocations. In particular: 1. *_UNSIGNED relocations are no longer marked with the `TLV` bit, even though they can occur within TLV sections. Instead the `TLV` bit is reserved for relocations that can reference thread-local symbols, and *_UNSIGNED relocations have their own `UNSIGNED` bit. The previous implementation caused TLV and regular UNSIGNED semantics to be conflated, resulting in rebase opcodes being incorrectly emitted for TLV relocations. 2. I've added a new `POINTER` bit to denote non-relaxable GOT relocations. This distinction isn't important on x86 -- the GOT relocations there are either relaxable or non-relaxable loads -- but arm64 has `GOT_LOAD_PAGE21` which loads the page that the referent symbol is in (regardless of whether the symbol ends up in the GOT). This relocation must reference a GOT symbol (so must have the `GOT` bit set) but isn't itself relaxable (so must not have the `LOAD` bit). The `POINTER` bit is used for relocations that *must* reference a GOT slot. 3. A similar situation occurs for TLV relocations. 4. ld64 supports both a pcrel and an absolute version of ARM64_RELOC_POINTER_TO_GOT. But the semantics of the absolute version are pretty weird -- it results in the value of the GOT slot being written, rather than the address. (That means a reference to a dynamically-bound slot will result in zeroes being written.) The programs I've tried linking don't use this form of the relocation, so I've dropped our partial support for it by removing the relevant RelocAttrBits. Reviewed By: alexshap Differential Revision: https://reviews.llvm.org/D97031
2021-02-24 02:41:54 +00:00
if (relocAttrs.hasAttr(RelocAttrBits::POINTER) || needsBinding(sym))
in.got->addEntry(sym);
} else if (relocAttrs.hasAttr(RelocAttrBits::TLV)) {
if (needsBinding(sym))
in.tlvPointers->addEntry(sym);
[lld-macho] Fix semantics & add tests for ARM64 GOT/TLV relocs I've adjusted the RelocAttrBits to better fit the semantics of the relocations. In particular: 1. *_UNSIGNED relocations are no longer marked with the `TLV` bit, even though they can occur within TLV sections. Instead the `TLV` bit is reserved for relocations that can reference thread-local symbols, and *_UNSIGNED relocations have their own `UNSIGNED` bit. The previous implementation caused TLV and regular UNSIGNED semantics to be conflated, resulting in rebase opcodes being incorrectly emitted for TLV relocations. 2. I've added a new `POINTER` bit to denote non-relaxable GOT relocations. This distinction isn't important on x86 -- the GOT relocations there are either relaxable or non-relaxable loads -- but arm64 has `GOT_LOAD_PAGE21` which loads the page that the referent symbol is in (regardless of whether the symbol ends up in the GOT). This relocation must reference a GOT symbol (so must have the `GOT` bit set) but isn't itself relaxable (so must not have the `LOAD` bit). The `POINTER` bit is used for relocations that *must* reference a GOT slot. 3. A similar situation occurs for TLV relocations. 4. ld64 supports both a pcrel and an absolute version of ARM64_RELOC_POINTER_TO_GOT. But the semantics of the absolute version are pretty weird -- it results in the value of the GOT slot being written, rather than the address. (That means a reference to a dynamically-bound slot will result in zeroes being written.) The programs I've tried linking don't use this form of the relocation, so I've dropped our partial support for it by removing the relevant RelocAttrBits. Reviewed By: alexshap Differential Revision: https://reviews.llvm.org/D97031
2021-02-24 02:41:54 +00:00
} else if (relocAttrs.hasAttr(RelocAttrBits::UNSIGNED)) {
// References from thread-local variable sections are treated as offsets
// relative to the start of the referent section, and therefore have no
// need of rebase opcodes.
if (!(isThreadLocalVariables(isec->getFlags()) && isa<Defined>(sym)))
addNonLazyBindingEntries(sym, isec, r.offset, r.addend);
}
}
void Writer::scanRelocations() {
TimeTraceScope timeScope("Scan relocations");
// This can't use a for-each loop: It calls treatUndefinedSymbol(), which can
// add to inputSections, which invalidates inputSections's iterators.
for (size_t i = 0; i < inputSections.size(); ++i) {
ConcatInputSection *isec = inputSections[i];
[lld-macho] Move ICF earlier to avoid emitting redundant binds This is a pretty big refactoring diff, so here are the motivations: Previously, ICF ran after scanRelocations(), where we emitting bind/rebase opcodes etc. So we had a bunch of redundant leftovers after ICF. Having ICF run before Writer seems like a better design, and is what LLD-ELF does, so this diff refactors it accordingly. However, ICF had two dependencies on things occurring in Writer: 1) it needs literals to be deduplicated beforehand and 2) it needs to know which functions have unwind info, which was being handled by `UnwindInfoSection::prepareRelocations()`. In order to do literal deduplication earlier, we need to add literal input sections to their corresponding output sections. So instead of putting all input sections into the big `inputSections` vector, and then filtering them by type later on, I've changed things so that literal sections get added directly to their output sections during the 'gather' phase. Likewise for compact unwind sections -- they get added directly to the UnwindInfoSection now. This latter change is not strictly necessary, but makes it easier for ICF to determine which functions have unwind info. Adding literal sections directly to their output sections means that we can no longer determine `inputOrder` from iterating over `inputSections`. Instead, we store that order explicitly on InputSection. Bloating the size of InputSection for this purpose would be unfortunate -- but LLD-ELF has already solved this problem: it reuses `outSecOff` to store this order value. One downside of this refactor is that we now make an additional pass over the unwind info relocations to figure out which functions have unwind info, since want to know that before `processRelocations()`. I've made sure to run that extra loop only if ICF is enabled, so there should be no overhead in non-optimizing runs of the linker. The upside of all this is that the `inputSections` vector now contains only ConcatInputSections that are destined for ConcatOutputSections, so we can clean up a bunch of code that just existed to filter out other elements from that vector. I will test for the lack of redundant binds/rebases in the upcoming cfstring deduplication diff. While binds/rebases can also happen in the regular `.text` section, they're more common in `.data` sections, so it seems more natural to test it that way. This change is perf-neutral when linking chromium_framework. Reviewed By: oontvoo Differential Revision: https://reviews.llvm.org/D105044
2021-07-02 00:33:42 +00:00
if (isec->shouldOmitFromOutput())
[lld/mac] Write every weak symbol only once in the output Before this, if an inline function was defined in several input files, lld would write each copy of the inline function the output. With this patch, it only writes one copy. Reduces the size of Chromium Framework from 378MB to 345MB (compared to 290MB linked with ld64, which also does dead-stripping, which we don't do yet), and makes linking it faster: N Min Max Median Avg Stddev x 10 3.9957051 4.3496981 4.1411121 4.156837 0.10092097 + 10 3.908154 4.169318 3.9712729 3.9846753 0.075773012 Difference at 95.0% confidence -0.172162 +/- 0.083847 -4.14165% +/- 2.01709% (Student's t, pooled s = 0.0892373) Implementation-wise, when merging two weak symbols, this sets a "canOmitFromOutput" on the InputSection belonging to the weak symbol not put in the symbol table. We then don't write InputSections that have this set, as long as they are not referenced from other symbols. (This happens e.g. for object files that don't set .subsections_via_symbols or that use .alt_entry.) Some restrictions: - not yet done for bitcode inputs - no "comdat" handling (`kindNoneGroupSubordinate*` in ld64) -- Frame Descriptor Entries (FDEs), Language Specific Data Areas (LSDAs) (that is, catch block unwind information) and Personality Routines associated with weak functions still not stripped. This is wasteful, but harmless. - However, this does strip weaks from __unwind_info (which is needed for correctness and not just for size) - This nopes out on InputSections that are referenced form more than one symbol (eg from .alt_entry) for now Things that work based on symbols Just Work: - map files (change in MapFile.cpp is no-op and not needed; I just found it a bit more explicit) - exports Things that work with inputSections need to explicitly check if an inputSection is written (e.g. unwind info). This patch is useful in itself, but it's also likely also a useful foundation for dead_strip. I used to have a "canoncialRepresentative" pointer on InputSection instead of just the bool, which would be handy for ICF too. But I ended up not needing it for this patch, so I removed that again for now. Differential Revision: https://reviews.llvm.org/D102076
2021-05-06 18:47:57 +00:00
continue;
for (auto it = isec->relocs.begin(); it != isec->relocs.end(); ++it) {
lld::macho::Reloc &r = *it;
// Canonicalize the referent so that later accesses in Writer won't
// have to worry about it.
if (auto *referentIsec = r.referent.dyn_cast<InputSection *>())
r.referent = referentIsec->canonical();
if (target->hasAttr(r.type, RelocAttrBits::SUBTRAHEND)) {
// Skip over the following UNSIGNED relocation -- it's just there as the
// minuend, and doesn't have the usual UNSIGNED semantics. We don't want
// to emit rebase opcodes for it.
++it;
// Canonicalize the referent so that later accesses in Writer won't
// have to worry about it.
if (auto *referentIsec = it->referent.dyn_cast<InputSection *>())
it->referent = referentIsec->canonical();
continue;
}
if (auto *sym = r.referent.dyn_cast<Symbol *>()) {
if (auto *undefined = dyn_cast<Undefined>(sym))
treatUndefinedSymbol(*undefined, isec, r.offset);
// treatUndefinedSymbol() can replace sym with a DylibSymbol; re-check.
if (!isa<Undefined>(sym) && validateSymbolRelocation(sym, isec, r))
prepareSymbolRelocation(sym, isec, r);
} else {
if (!r.pcrel) {
if (config->emitChainedFixups)
in.chainedFixups->addRebase(isec, r.offset);
else
in.rebase->addEntry(isec, r.offset);
}
}
}
}
[lld-macho] Move ICF earlier to avoid emitting redundant binds This is a pretty big refactoring diff, so here are the motivations: Previously, ICF ran after scanRelocations(), where we emitting bind/rebase opcodes etc. So we had a bunch of redundant leftovers after ICF. Having ICF run before Writer seems like a better design, and is what LLD-ELF does, so this diff refactors it accordingly. However, ICF had two dependencies on things occurring in Writer: 1) it needs literals to be deduplicated beforehand and 2) it needs to know which functions have unwind info, which was being handled by `UnwindInfoSection::prepareRelocations()`. In order to do literal deduplication earlier, we need to add literal input sections to their corresponding output sections. So instead of putting all input sections into the big `inputSections` vector, and then filtering them by type later on, I've changed things so that literal sections get added directly to their output sections during the 'gather' phase. Likewise for compact unwind sections -- they get added directly to the UnwindInfoSection now. This latter change is not strictly necessary, but makes it easier for ICF to determine which functions have unwind info. Adding literal sections directly to their output sections means that we can no longer determine `inputOrder` from iterating over `inputSections`. Instead, we store that order explicitly on InputSection. Bloating the size of InputSection for this purpose would be unfortunate -- but LLD-ELF has already solved this problem: it reuses `outSecOff` to store this order value. One downside of this refactor is that we now make an additional pass over the unwind info relocations to figure out which functions have unwind info, since want to know that before `processRelocations()`. I've made sure to run that extra loop only if ICF is enabled, so there should be no overhead in non-optimizing runs of the linker. The upside of all this is that the `inputSections` vector now contains only ConcatInputSections that are destined for ConcatOutputSections, so we can clean up a bunch of code that just existed to filter out other elements from that vector. I will test for the lack of redundant binds/rebases in the upcoming cfstring deduplication diff. While binds/rebases can also happen in the regular `.text` section, they're more common in `.data` sections, so it seems more natural to test it that way. This change is perf-neutral when linking chromium_framework. Reviewed By: oontvoo Differential Revision: https://reviews.llvm.org/D105044
2021-07-02 00:33:42 +00:00
in.unwindInfo->prepare();
}
static void addNonWeakDefinition(const Defined *defined) {
if (config->emitChainedFixups)
in.chainedFixups->setHasNonWeakDefinition();
else
in.weakBinding->addNonWeakDefinition(defined);
}
void Writer::scanSymbols() {
TimeTraceScope timeScope("Scan symbols");
for (Symbol *sym : symtab->getSymbols()) {
if (auto *defined = dyn_cast<Defined>(sym)) {
if (!defined->isLive())
continue;
defined->canonicalize();
if (defined->overridesWeakDef)
addNonWeakDefinition(defined);
if (!defined->isAbsolute() && isCodeSection(defined->isec))
in.unwindInfo->addSymbol(defined);
} else if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) {
[lld/mac] Implement -dead_strip Also adds support for live_support sections, no_dead_strip sections, .no_dead_strip symbols. Chromium Framework 345MB unstripped -> 250MB stripped (vs 290MB unstripped -> 236M stripped with ld64). Doing dead stripping is a bit faster than not, because so much less data needs to be processed: % ministat lld_* x lld_nostrip.txt + lld_strip.txt N Min Max Median Avg Stddev x 10 3.929414 4.07692 4.0269079 4.0089678 0.044214794 + 10 3.8129408 3.9025559 3.8670411 3.8642573 0.024779651 Difference at 95.0% confidence -0.144711 +/- 0.0336749 -3.60967% +/- 0.839989% (Student's t, pooled s = 0.0358398) This interacts with many parts of the linker. I tried to add test coverage for all added `isLive()` checks, so that some test will fail if any of them is removed. I checked that the test expectations for the most part match ld64's behavior (except for live-support-iterations.s, see the comment in the test). Interacts with: - debug info - export tries - import opcodes - flags like -exported_symbol(s_list) - -U / dynamic_lookup - mod_init_funcs, mod_term_funcs - weak symbol handling - unwind info - stubs - map files - -sectcreate - undefined, dylib, common, defined (both absolute and normal) symbols It's possible it interacts with more features I didn't think of, of course. I also did some manual testing: - check-llvm check-clang check-lld work with lld with this patch as host linker and -dead_strip enabled - Chromium still starts - Chromium's base_unittests still pass, including unwind tests Implemenation-wise, this is InputSection-based, so it'll work for object files with .subsections_via_symbols (which includes all object files generated by clang). I first based this on the COFF implementation, but later realized that things are more similar to ELF. I think it'd be good to refactor MarkLive.cpp to look more like the ELF part at some point, but I'd like to get a working state checked in first. Mechanical parts: - Rename canOmitFromOutput to wasCoalesced (no behavior change) since it really is for weak coalesced symbols - Add noDeadStrip to Defined, corresponding to N_NO_DEAD_STRIP (`.no_dead_strip` in asm) Fixes PR49276. Differential Revision: https://reviews.llvm.org/D103324
2021-05-07 21:10:05 +00:00
// This branch intentionally doesn't check isLive().
if (dysym->isDynamicLookup())
continue;
dysym->getFile()->refState =
std::max(dysym->getFile()->refState, dysym->getRefState());
} else if (isa<Undefined>(sym)) {
if (sym->getName().starts_with(ObjCStubsSection::symbolPrefix))
[lld-macho] Add support for objc_msgSend stubs Apple Clang in Xcode 14 introduced a new feature for reducing the overhead of objc_msgSend calls by deduplicating the setup calls for each individual selector. This works by clang adding undefined symbols for each selector called in a translation unit, such as `_objc_msgSend$foo` for calling the `foo` method on any `NSObject`. There are 2 different modes for this behavior, the default directly does the setup for `_objc_msgSend` and calls it, and the smaller option does the selector setup, and then calls the standard `_objc_msgSend` stub function. The general overview of how this works is: - Undefined symbols with the given prefix are collected - The suffix of each matching undefined symbol is added as a string to `__objc_methname` - A pointer is added for every method name in the `__objc_selrefs` section - A `got` entry is emitted for `_objc_msgSend` - Stubs are emitting pointing to the synthesized locations Notes: - Both `__objc_methname` and `__objc_selrefs` can also exist from object files, so their contents are merged with our synthesized contents - The compiler emits method names for defined methods, but not for undefined symbols you call, but stubs are used for both - This only implements the default "fast" mode currently just to reduce the diff, I also doubt many folks will care to swap modes - This only implements this for arm64 and x86_64, we don't need to implement this for 32 bit iOS archs, but we should implement it for watchOS archs in a later diff Differential Revision: https://reviews.llvm.org/D128108
2022-06-17 04:35:18 +00:00
in.objcStubs->addEntry(sym);
}
}
for (const InputFile *file : inputFiles) {
if (auto *objFile = dyn_cast<ObjFile>(file))
for (Symbol *sym : objFile->symbols) {
if (auto *defined = dyn_cast_or_null<Defined>(sym)) {
if (!defined->isLive())
continue;
defined->canonicalize();
if (!defined->isExternal() && !defined->isAbsolute() &&
isCodeSection(defined->isec))
in.unwindInfo->addSymbol(defined);
}
}
}
}
// TODO: ld64 enforces the old load commands in a few other cases.
static bool useLCBuildVersion(const PlatformInfo &platformInfo) {
static const std::array<std::pair<PlatformType, VersionTuple>, 7> minVersion =
{{{PLATFORM_MACOS, VersionTuple(10, 14)},
{PLATFORM_IOS, VersionTuple(12, 0)},
{PLATFORM_IOSSIMULATOR, VersionTuple(13, 0)},
{PLATFORM_TVOS, VersionTuple(12, 0)},
{PLATFORM_TVOSSIMULATOR, VersionTuple(13, 0)},
{PLATFORM_WATCHOS, VersionTuple(5, 0)},
{PLATFORM_WATCHOSSIMULATOR, VersionTuple(6, 0)}}};
auto it = llvm::find_if(minVersion, [&](const auto &p) {
return p.first == platformInfo.target.Platform;
});
return it == minVersion.end()
? true
: platformInfo.target.MinDeployment >= it->second;
}
template <class LP> void Writer::createLoadCommands() {
uint8_t segIndex = 0;
for (OutputSegment *seg : outputSegments) {
in.header->addLoadCommand(make<LCSegment<LP>>(seg->name, seg));
seg->index = segIndex++;
}
if (config->emitChainedFixups) {
in.header->addLoadCommand(make<LCChainedFixups>(in.chainedFixups));
in.header->addLoadCommand(make<LCExportsTrie>(in.exports));
} else {
in.header->addLoadCommand(make<LCDyldInfo>(
in.rebase, in.binding, in.weakBinding, in.lazyBinding, in.exports));
}
in.header->addLoadCommand(make<LCSymtab>(symtabSection, stringTableSection));
in.header->addLoadCommand(
make<LCDysymtab>(symtabSection, indirectSymtabSection));
if (!config->umbrella.empty())
in.header->addLoadCommand(make<LCSubFramework>(config->umbrella));
if (config->emitEncryptionInfo)
in.header->addLoadCommand(make<LCEncryptionInfo<LP>>());
for (StringRef path : config->runtimePaths)
in.header->addLoadCommand(make<LCRPath>(path));
switch (config->outputType) {
case MH_EXECUTE:
in.header->addLoadCommand(make<LCLoadDylinker>());
break;
case MH_DYLIB:
in.header->addLoadCommand(make<LCDylib>(LC_ID_DYLIB, config->installName,
config->dylibCompatibilityVersion,
config->dylibCurrentVersion));
break;
case MH_BUNDLE:
break;
default:
llvm_unreachable("unhandled output file type");
}
if (config->generateUuid) {
uuidCommand = make<LCUuid>();
in.header->addLoadCommand(uuidCommand);
}
if (useLCBuildVersion(config->platformInfo))
in.header->addLoadCommand(make<LCBuildVersion>(config->platformInfo));
else
in.header->addLoadCommand(make<LCMinVersion>(config->platformInfo));
[lld/mac] Support writing zippered dylibs and bundles With -platform_version flags for two distinct platforms, this writes a LC_BUILD_VERSION header for each. The motivation is that this is needed for self-hosting with lld as linker after D124059. To create a zippered output at the clang driver level, pass -target arm64-apple-macos -darwin-target-variant arm64-apple-ios-macabi to create a zippered dylib. (In Xcode's clang, `-darwin-target-variant` is spelled just `-target-variant`.) (If you pass `-target arm64-apple-ios-macabi -target-variant arm64-apple-macos` instead, ld64 crashes!) This results in two -platform_version flags being passed to the linker. ld64 also verifies that the iOS SDK version is at least 13.1. We don't do that yet. But ld64 also does that for other platforms and we don't. So we need to do that at some point, but not in this patch. Only dylib and bundle outputs can be zippered. I verified that a Catalyst app linked against a dylib created with clang -shared foo.cc -o libfoo.dylib \ -target arm64-apple-macos \ -target-variant arm64-apple-ios-macabi \ -Wl,-install_name,@rpath/libfoo.dylib \ -fuse-ld=$PWD/out/gn/bin/ld64.lld runs successfully. (The app calls a function `f()` in libfoo.dylib that returns a const char* "foo", and NSLog(@"%s")s it.) ld64 is a bit more permissive when writing zippered outputs, see references to "unzippered twins". That's not implemented yet. (If anybody wants to implement that, D124275 is a good start.) Differential Revision: https://reviews.llvm.org/D124887
2022-04-22 15:55:50 +00:00
if (config->secondaryPlatformInfo) {
in.header->addLoadCommand(
make<LCBuildVersion>(*config->secondaryPlatformInfo));
}
// This is down here to match ld64's load command order.
if (config->outputType == MH_EXECUTE)
in.header->addLoadCommand(make<LCMain>());
// See ld64's OutputFile::buildDylibOrdinalMapping for the corresponding
// library ordinal computation code in ld64.
int64_t dylibOrdinal = 1;
DenseMap<StringRef, int64_t> ordinalForInstallName;
std::vector<DylibFile *> dylibFiles;
for (InputFile *file : inputFiles) {
if (auto *dylibFile = dyn_cast<DylibFile>(file))
dylibFiles.push_back(dylibFile);
}
for (size_t i = 0; i < dylibFiles.size(); ++i)
dylibFiles.insert(dylibFiles.end(), dylibFiles[i]->extraDylibs.begin(),
dylibFiles[i]->extraDylibs.end());
for (DylibFile *dylibFile : dylibFiles) {
if (dylibFile->isBundleLoader) {
dylibFile->ordinal = BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE;
// Shortcut since bundle-loader does not re-export the symbols.
dylibFile->reexport = false;
continue;
}
// Don't emit load commands for a dylib that is not referenced if:
// - it was added implicitly (via a reexport, an LC_LOAD_DYLINKER --
// if it's on the linker command line, it's explicit)
// - or it's marked MH_DEAD_STRIPPABLE_DYLIB
// - or the flag -dead_strip_dylibs is used
// FIXME: `isReferenced()` is currently computed before dead code
// stripping, so references from dead code keep a dylib alive. This
// matches ld64, but it's something we should do better.
if (!dylibFile->isReferenced() && !dylibFile->forceNeeded &&
(!dylibFile->isExplicitlyLinked() || dylibFile->deadStrippable ||
config->deadStripDylibs))
continue;
// Several DylibFiles can have the same installName. Only emit a single
// load command for that installName and give all these DylibFiles the
// same ordinal.
// This can happen in several cases:
// - a new framework could change its installName to an older
// framework name via an $ld$ symbol depending on platform_version
// - symlinks (for example, libpthread.tbd is a symlink to libSystem.tbd;
// Foo.framework/Foo.tbd is usually a symlink to
// Foo.framework/Versions/Current/Foo.tbd, where
// Foo.framework/Versions/Current is usually a symlink to
// Foo.framework/Versions/A)
// - a framework can be linked both explicitly on the linker
// command line and implicitly as a reexport from a different
// framework. The re-export will usually point to the tbd file
// in Foo.framework/Versions/A/Foo.tbd, while the explicit link will
// usually find Foo.framework/Foo.tbd. These are usually symlinks,
// but in a --reproduce archive they will be identical but distinct
// files.
// In the first case, *semantically distinct* DylibFiles will have the
// same installName.
int64_t &ordinal = ordinalForInstallName[dylibFile->installName];
if (ordinal) {
dylibFile->ordinal = ordinal;
continue;
}
ordinal = dylibFile->ordinal = dylibOrdinal++;
LoadCommandType lcType =
dylibFile->forceWeakImport || dylibFile->refState == RefState::Weak
? LC_LOAD_WEAK_DYLIB
: LC_LOAD_DYLIB;
in.header->addLoadCommand(make<LCDylib>(lcType, dylibFile->installName,
dylibFile->compatibilityVersion,
dylibFile->currentVersion));
if (dylibFile->reexport)
in.header->addLoadCommand(
make<LCDylib>(LC_REEXPORT_DYLIB, dylibFile->installName));
}
for (const auto &dyldEnv : config->dyldEnvs)
in.header->addLoadCommand(make<LCDyldEnv>(dyldEnv));
if (functionStartsSection)
in.header->addLoadCommand(make<LCFunctionStarts>(functionStartsSection));
if (dataInCodeSection)
in.header->addLoadCommand(make<LCDataInCode>(dataInCodeSection));
if (codeSignatureSection)
in.header->addLoadCommand(make<LCCodeSignature>(codeSignatureSection));
const uint32_t MACOS_MAXPATHLEN = 1024;
config->headerPad = std::max(
config->headerPad, (config->headerPadMaxInstallNames
? LCDylib::getInstanceCount() * MACOS_MAXPATHLEN
: 0));
}
// Sorting only can happen once all outputs have been collected. Here we sort
// segments, output sections within each segment, and input sections within each
// output segment.
static void sortSegmentsAndSections() {
TimeTraceScope timeScope("Sort segments and sections");
sortOutputSegments();
DenseMap<const InputSection *, size_t> isecPriorities =
priorityBuilder.buildInputSectionPriorities();
uint32_t sectionIndex = 0;
for (OutputSegment *seg : outputSegments) {
seg->sortOutputSections();
// References from thread-local variable sections are treated as offsets
// relative to the start of the thread-local data memory area, which
// is initialized via copying all the TLV data sections (which are all
// contiguous). If later data sections require a greater alignment than
// earlier ones, the offsets of data within those sections won't be
// guaranteed to aligned unless we normalize alignments. We therefore use
// the largest alignment for all TLV data sections.
uint32_t tlvAlign = 0;
for (const OutputSection *osec : seg->getSections())
if (isThreadLocalData(osec->flags) && osec->align > tlvAlign)
tlvAlign = osec->align;
for (OutputSection *osec : seg->getSections()) {
// Now that the output sections are sorted, assign the final
// output section indices.
[lld-macho] Refactor segment/section creation, sorting, and merging Summary: There were a few issues with the previous setup: 1. The section sorting comparator used a declarative map of section names to determine the correct order, but it turns out we need to match on more than just names -- in particular, an upcoming diff will sort based on whether the S_ZERO_FILL flag is set. This diff changes the sorter to a more imperative but flexible form. 2. We were sorting OutputSections stored in a MapVector, which left the MapVector in an inconsistent state -- the wrong keys map to the wrong values! In practice, we weren't doing key lookups (only container iteration) after the sort, so this was fine, but it was still a dubious state of affairs. This diff copies the OutputSections to a vector before sorting them. 3. We were adding unneeded OutputSections to OutputSegments and then filtering them out later, which meant that we had to remember whether an OutputSegment was in a pre- or post-filtered state. This diff only adds the sections to the segments if they are needed. In addition to those major changes, two minor ones worth noting: 1. I renamed all OutputSection variable names to `osec`, to parallel `isec`. Previously we were using some inconsistent combination of `osec`, `os`, and `section`. 2. I added a check (and a test) for InputSections with names that clashed with those of our synthetic OutputSections. Reviewers: #lld-macho Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81887
2020-06-15 07:03:24 +00:00
if (!osec->isHidden())
osec->index = ++sectionIndex;
if (isThreadLocalData(osec->flags)) {
if (!firstTLVDataSection)
firstTLVDataSection = osec;
osec->align = tlvAlign;
}
if (!isecPriorities.empty()) {
if (auto *merged = dyn_cast<ConcatOutputSection>(osec)) {
llvm::stable_sort(
merged->inputs, [&](InputSection *a, InputSection *b) {
return isecPriorities.lookup(a) > isecPriorities.lookup(b);
});
}
}
}
}
}
template <class LP> void Writer::createOutputSections() {
TimeTraceScope timeScope("Create output sections");
// First, create hidden sections
stringTableSection = make<StringTableSection>();
symtabSection = makeSymtabSection<LP>(*stringTableSection);
indirectSymtabSection = make<IndirectSymtabSection>();
if (config->adhocCodesign)
codeSignatureSection = make<CodeSignatureSection>();
if (config->emitDataInCodeInfo)
dataInCodeSection = make<DataInCodeSection>();
if (config->emitFunctionStarts)
functionStartsSection = make<FunctionStartsSection>();
switch (config->outputType) {
case MH_EXECUTE:
make<PageZeroSection>();
break;
case MH_DYLIB:
case MH_BUNDLE:
break;
default:
llvm_unreachable("unhandled output file type");
}
// Then add input sections to output sections.
[lld-macho] Move ICF earlier to avoid emitting redundant binds This is a pretty big refactoring diff, so here are the motivations: Previously, ICF ran after scanRelocations(), where we emitting bind/rebase opcodes etc. So we had a bunch of redundant leftovers after ICF. Having ICF run before Writer seems like a better design, and is what LLD-ELF does, so this diff refactors it accordingly. However, ICF had two dependencies on things occurring in Writer: 1) it needs literals to be deduplicated beforehand and 2) it needs to know which functions have unwind info, which was being handled by `UnwindInfoSection::prepareRelocations()`. In order to do literal deduplication earlier, we need to add literal input sections to their corresponding output sections. So instead of putting all input sections into the big `inputSections` vector, and then filtering them by type later on, I've changed things so that literal sections get added directly to their output sections during the 'gather' phase. Likewise for compact unwind sections -- they get added directly to the UnwindInfoSection now. This latter change is not strictly necessary, but makes it easier for ICF to determine which functions have unwind info. Adding literal sections directly to their output sections means that we can no longer determine `inputOrder` from iterating over `inputSections`. Instead, we store that order explicitly on InputSection. Bloating the size of InputSection for this purpose would be unfortunate -- but LLD-ELF has already solved this problem: it reuses `outSecOff` to store this order value. One downside of this refactor is that we now make an additional pass over the unwind info relocations to figure out which functions have unwind info, since want to know that before `processRelocations()`. I've made sure to run that extra loop only if ICF is enabled, so there should be no overhead in non-optimizing runs of the linker. The upside of all this is that the `inputSections` vector now contains only ConcatInputSections that are destined for ConcatOutputSections, so we can clean up a bunch of code that just existed to filter out other elements from that vector. I will test for the lack of redundant binds/rebases in the upcoming cfstring deduplication diff. While binds/rebases can also happen in the regular `.text` section, they're more common in `.data` sections, so it seems more natural to test it that way. This change is perf-neutral when linking chromium_framework. Reviewed By: oontvoo Differential Revision: https://reviews.llvm.org/D105044
2021-07-02 00:33:42 +00:00
for (ConcatInputSection *isec : inputSections) {
if (isec->shouldOmitFromOutput())
continue;
2021-07-17 17:42:26 +00:00
ConcatOutputSection *osec = cast<ConcatOutputSection>(isec->parent);
[lld-macho] Move ICF earlier to avoid emitting redundant binds This is a pretty big refactoring diff, so here are the motivations: Previously, ICF ran after scanRelocations(), where we emitting bind/rebase opcodes etc. So we had a bunch of redundant leftovers after ICF. Having ICF run before Writer seems like a better design, and is what LLD-ELF does, so this diff refactors it accordingly. However, ICF had two dependencies on things occurring in Writer: 1) it needs literals to be deduplicated beforehand and 2) it needs to know which functions have unwind info, which was being handled by `UnwindInfoSection::prepareRelocations()`. In order to do literal deduplication earlier, we need to add literal input sections to their corresponding output sections. So instead of putting all input sections into the big `inputSections` vector, and then filtering them by type later on, I've changed things so that literal sections get added directly to their output sections during the 'gather' phase. Likewise for compact unwind sections -- they get added directly to the UnwindInfoSection now. This latter change is not strictly necessary, but makes it easier for ICF to determine which functions have unwind info. Adding literal sections directly to their output sections means that we can no longer determine `inputOrder` from iterating over `inputSections`. Instead, we store that order explicitly on InputSection. Bloating the size of InputSection for this purpose would be unfortunate -- but LLD-ELF has already solved this problem: it reuses `outSecOff` to store this order value. One downside of this refactor is that we now make an additional pass over the unwind info relocations to figure out which functions have unwind info, since want to know that before `processRelocations()`. I've made sure to run that extra loop only if ICF is enabled, so there should be no overhead in non-optimizing runs of the linker. The upside of all this is that the `inputSections` vector now contains only ConcatInputSections that are destined for ConcatOutputSections, so we can clean up a bunch of code that just existed to filter out other elements from that vector. I will test for the lack of redundant binds/rebases in the upcoming cfstring deduplication diff. While binds/rebases can also happen in the regular `.text` section, they're more common in `.data` sections, so it seems more natural to test it that way. This change is perf-neutral when linking chromium_framework. Reviewed By: oontvoo Differential Revision: https://reviews.llvm.org/D105044
2021-07-02 00:33:42 +00:00
osec->addInput(isec);
osec->inputOrder =
std::min(osec->inputOrder, static_cast<int>(isec->outSecOff));
}
[lld-macho] Implement cstring deduplication Our implementation draws heavily from LLD-ELF's, which in turn delegates its string deduplication to llvm-mc's StringTableBuilder. The messiness of this diff is largely due to the fact that we've previously assumed that all InputSections get concatenated together to form the output. This is no longer true with CStringInputSections, which split their contents into StringPieces. StringPieces are much more lightweight than InputSections, which is important as we create a lot of them. They may also overlap in the output, which makes it possible for strings to be tail-merged. In fact, the initial version of this diff implemented tail merging, but I've dropped it for reasons I'll explain later. **Alignment Issues** Mergeable cstring literals are found under the `__TEXT,__cstring` section. In contrast to ELF, which puts strings that need different alignments into different sections, clang's Mach-O backend puts them all in one section. Strings that need to be aligned have the `.p2align` directive emitted before them, which simply translates into zero padding in the object file. I *think* ld64 extracts the desired per-string alignment from this data by preserving each string's offset from the last section-aligned address. I'm not entirely certain since it doesn't seem consistent about doing this; but perhaps this can be chalked up to cases where ld64 has to deduplicate strings with different offset/alignment combos -- it seems to pick one of their alignments to preserve. This doesn't seem correct in general; we can in fact can induce ld64 to produce a crashing binary just by linking in an additional object file that only contains cstrings and no code. See PR50563 for details. Moreover, this scheme seems rather inefficient: since unaligned and aligned strings are all put in the same section, which has a single alignment value, it doesn't seem possible to tell whether a given string doesn't have any alignment requirements. Preserving offset+alignments for strings that don't need it is wasteful. In practice, the crashes seen so far seem to stem from x86_64 SIMD operations on cstrings. X86_64 requires SIMD accesses to be 16-byte-aligned. So for now, I'm thinking of just aligning all strings to 16 bytes on x86_64. This is indeed wasteful, but implementation-wise it's simpler than preserving per-string alignment+offsets. It also avoids the aforementioned crash after deduplication of differently-aligned strings. Finally, the overhead is not huge: using 16-byte alignment (vs no alignment) is only a 0.5% size overhead when linking chromium_framework. With these alignment requirements, it doesn't make sense to attempt tail merging -- most strings will not be eligible since their overlaps aren't likely to start at a 16-byte boundary. Tail-merging (with alignment) for chromium_framework only improves size by 0.3%. It's worth noting that LLD-ELF only does tail merging at `-O2`. By default (at `-O1`), it just deduplicates w/o tail merging. @thakis has also mentioned that they saw it regress compressed size in some cases and therefore turned it off. `ld64` does not seem to do tail merging at all. **Performance Numbers** CString deduplication reduces chromium_framework from 250MB to 242MB, or about a 3.2% reduction. Numbers for linking chromium_framework on my 3.2 GHz 16-Core Intel Xeon W: N Min Max Median Avg Stddev x 20 3.91 4.03 3.935 3.95 0.034641016 + 20 3.99 4.14 4.015 4.0365 0.0492336 Difference at 95.0% confidence 0.0865 +/- 0.027245 2.18987% +/- 0.689746% (Student's t, pooled s = 0.0425673) As expected, cstring merging incurs some non-trivial overhead. When passing `--no-literal-merge`, it seems that performance is the same, i.e. the refactoring in this diff didn't cost us. N Min Max Median Avg Stddev x 20 3.91 4.03 3.935 3.95 0.034641016 + 20 3.89 4.02 3.935 3.9435 0.043197831 No difference proven at 95.0% confidence Reviewed By: #lld-macho, gkm Differential Revision: https://reviews.llvm.org/D102964
2021-06-08 03:47:12 +00:00
// Once all the inputs are added, we can finalize the output section
// properties and create the corresponding output segments.
for (const auto &it : concatOutputSections) {
[lld-macho] Refactor segment/section creation, sorting, and merging Summary: There were a few issues with the previous setup: 1. The section sorting comparator used a declarative map of section names to determine the correct order, but it turns out we need to match on more than just names -- in particular, an upcoming diff will sort based on whether the S_ZERO_FILL flag is set. This diff changes the sorter to a more imperative but flexible form. 2. We were sorting OutputSections stored in a MapVector, which left the MapVector in an inconsistent state -- the wrong keys map to the wrong values! In practice, we weren't doing key lookups (only container iteration) after the sort, so this was fine, but it was still a dubious state of affairs. This diff copies the OutputSections to a vector before sorting them. 3. We were adding unneeded OutputSections to OutputSegments and then filtering them out later, which meant that we had to remember whether an OutputSegment was in a pre- or post-filtered state. This diff only adds the sections to the segments if they are needed. In addition to those major changes, two minor ones worth noting: 1. I renamed all OutputSection variable names to `osec`, to parallel `isec`. Previously we were using some inconsistent combination of `osec`, `os`, and `section`. 2. I added a check (and a test) for InputSections with names that clashed with those of our synthetic OutputSections. Reviewers: #lld-macho Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81887
2020-06-15 07:03:24 +00:00
StringRef segname = it.first.first;
ConcatOutputSection *osec = it.second;
[lld-macho] Move ICF earlier to avoid emitting redundant binds This is a pretty big refactoring diff, so here are the motivations: Previously, ICF ran after scanRelocations(), where we emitting bind/rebase opcodes etc. So we had a bunch of redundant leftovers after ICF. Having ICF run before Writer seems like a better design, and is what LLD-ELF does, so this diff refactors it accordingly. However, ICF had two dependencies on things occurring in Writer: 1) it needs literals to be deduplicated beforehand and 2) it needs to know which functions have unwind info, which was being handled by `UnwindInfoSection::prepareRelocations()`. In order to do literal deduplication earlier, we need to add literal input sections to their corresponding output sections. So instead of putting all input sections into the big `inputSections` vector, and then filtering them by type later on, I've changed things so that literal sections get added directly to their output sections during the 'gather' phase. Likewise for compact unwind sections -- they get added directly to the UnwindInfoSection now. This latter change is not strictly necessary, but makes it easier for ICF to determine which functions have unwind info. Adding literal sections directly to their output sections means that we can no longer determine `inputOrder` from iterating over `inputSections`. Instead, we store that order explicitly on InputSection. Bloating the size of InputSection for this purpose would be unfortunate -- but LLD-ELF has already solved this problem: it reuses `outSecOff` to store this order value. One downside of this refactor is that we now make an additional pass over the unwind info relocations to figure out which functions have unwind info, since want to know that before `processRelocations()`. I've made sure to run that extra loop only if ICF is enabled, so there should be no overhead in non-optimizing runs of the linker. The upside of all this is that the `inputSections` vector now contains only ConcatInputSections that are destined for ConcatOutputSections, so we can clean up a bunch of code that just existed to filter out other elements from that vector. I will test for the lack of redundant binds/rebases in the upcoming cfstring deduplication diff. While binds/rebases can also happen in the regular `.text` section, they're more common in `.data` sections, so it seems more natural to test it that way. This change is perf-neutral when linking chromium_framework. Reviewed By: oontvoo Differential Revision: https://reviews.llvm.org/D105044
2021-07-02 00:33:42 +00:00
assert(segname != segment_names::ld);
if (osec->isNeeded()) {
// See comment in ObjFile::splitEhFrames()
if (osec->name == section_names::ehFrame &&
segname == segment_names::text)
osec->align = target->wordSize;
// MC keeps the default 1-byte alignment for __thread_vars, even though it
// contains pointers that are fixed up by dyld, which requires proper
// alignment.
if (isThreadLocalVariables(osec->flags))
osec->align = std::max<uint32_t>(osec->align, target->wordSize);
2021-07-17 17:42:26 +00:00
getOrCreateOutputSegment(segname)->addOutputSection(osec);
}
[lld-macho] Refactor segment/section creation, sorting, and merging Summary: There were a few issues with the previous setup: 1. The section sorting comparator used a declarative map of section names to determine the correct order, but it turns out we need to match on more than just names -- in particular, an upcoming diff will sort based on whether the S_ZERO_FILL flag is set. This diff changes the sorter to a more imperative but flexible form. 2. We were sorting OutputSections stored in a MapVector, which left the MapVector in an inconsistent state -- the wrong keys map to the wrong values! In practice, we weren't doing key lookups (only container iteration) after the sort, so this was fine, but it was still a dubious state of affairs. This diff copies the OutputSections to a vector before sorting them. 3. We were adding unneeded OutputSections to OutputSegments and then filtering them out later, which meant that we had to remember whether an OutputSegment was in a pre- or post-filtered state. This diff only adds the sections to the segments if they are needed. In addition to those major changes, two minor ones worth noting: 1. I renamed all OutputSection variable names to `osec`, to parallel `isec`. Previously we were using some inconsistent combination of `osec`, `os`, and `section`. 2. I added a check (and a test) for InputSections with names that clashed with those of our synthetic OutputSections. Reviewers: #lld-macho Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81887
2020-06-15 07:03:24 +00:00
}
for (SyntheticSection *ssec : syntheticSections) {
auto it = concatOutputSections.find({ssec->segname, ssec->name});
// We add all LinkEdit sections here because we don't know if they are
// needed until their finalizeContents() methods get called later. While
// this means that we add some redundant sections to __LINKEDIT, there is
// is no redundancy in the output, as we do not emit section headers for
// any LinkEdit sections.
if (ssec->isNeeded() || ssec->segname == segment_names::linkEdit) {
[lld-macho] Implement cstring deduplication Our implementation draws heavily from LLD-ELF's, which in turn delegates its string deduplication to llvm-mc's StringTableBuilder. The messiness of this diff is largely due to the fact that we've previously assumed that all InputSections get concatenated together to form the output. This is no longer true with CStringInputSections, which split their contents into StringPieces. StringPieces are much more lightweight than InputSections, which is important as we create a lot of them. They may also overlap in the output, which makes it possible for strings to be tail-merged. In fact, the initial version of this diff implemented tail merging, but I've dropped it for reasons I'll explain later. **Alignment Issues** Mergeable cstring literals are found under the `__TEXT,__cstring` section. In contrast to ELF, which puts strings that need different alignments into different sections, clang's Mach-O backend puts them all in one section. Strings that need to be aligned have the `.p2align` directive emitted before them, which simply translates into zero padding in the object file. I *think* ld64 extracts the desired per-string alignment from this data by preserving each string's offset from the last section-aligned address. I'm not entirely certain since it doesn't seem consistent about doing this; but perhaps this can be chalked up to cases where ld64 has to deduplicate strings with different offset/alignment combos -- it seems to pick one of their alignments to preserve. This doesn't seem correct in general; we can in fact can induce ld64 to produce a crashing binary just by linking in an additional object file that only contains cstrings and no code. See PR50563 for details. Moreover, this scheme seems rather inefficient: since unaligned and aligned strings are all put in the same section, which has a single alignment value, it doesn't seem possible to tell whether a given string doesn't have any alignment requirements. Preserving offset+alignments for strings that don't need it is wasteful. In practice, the crashes seen so far seem to stem from x86_64 SIMD operations on cstrings. X86_64 requires SIMD accesses to be 16-byte-aligned. So for now, I'm thinking of just aligning all strings to 16 bytes on x86_64. This is indeed wasteful, but implementation-wise it's simpler than preserving per-string alignment+offsets. It also avoids the aforementioned crash after deduplication of differently-aligned strings. Finally, the overhead is not huge: using 16-byte alignment (vs no alignment) is only a 0.5% size overhead when linking chromium_framework. With these alignment requirements, it doesn't make sense to attempt tail merging -- most strings will not be eligible since their overlaps aren't likely to start at a 16-byte boundary. Tail-merging (with alignment) for chromium_framework only improves size by 0.3%. It's worth noting that LLD-ELF only does tail merging at `-O2`. By default (at `-O1`), it just deduplicates w/o tail merging. @thakis has also mentioned that they saw it regress compressed size in some cases and therefore turned it off. `ld64` does not seem to do tail merging at all. **Performance Numbers** CString deduplication reduces chromium_framework from 250MB to 242MB, or about a 3.2% reduction. Numbers for linking chromium_framework on my 3.2 GHz 16-Core Intel Xeon W: N Min Max Median Avg Stddev x 20 3.91 4.03 3.935 3.95 0.034641016 + 20 3.99 4.14 4.015 4.0365 0.0492336 Difference at 95.0% confidence 0.0865 +/- 0.027245 2.18987% +/- 0.689746% (Student's t, pooled s = 0.0425673) As expected, cstring merging incurs some non-trivial overhead. When passing `--no-literal-merge`, it seems that performance is the same, i.e. the refactoring in this diff didn't cost us. N Min Max Median Avg Stddev x 20 3.91 4.03 3.935 3.95 0.034641016 + 20 3.89 4.02 3.935 3.9435 0.043197831 No difference proven at 95.0% confidence Reviewed By: #lld-macho, gkm Differential Revision: https://reviews.llvm.org/D102964
2021-06-08 03:47:12 +00:00
if (it == concatOutputSections.end()) {
[lld-macho] Refactor segment/section creation, sorting, and merging Summary: There were a few issues with the previous setup: 1. The section sorting comparator used a declarative map of section names to determine the correct order, but it turns out we need to match on more than just names -- in particular, an upcoming diff will sort based on whether the S_ZERO_FILL flag is set. This diff changes the sorter to a more imperative but flexible form. 2. We were sorting OutputSections stored in a MapVector, which left the MapVector in an inconsistent state -- the wrong keys map to the wrong values! In practice, we weren't doing key lookups (only container iteration) after the sort, so this was fine, but it was still a dubious state of affairs. This diff copies the OutputSections to a vector before sorting them. 3. We were adding unneeded OutputSections to OutputSegments and then filtering them out later, which meant that we had to remember whether an OutputSegment was in a pre- or post-filtered state. This diff only adds the sections to the segments if they are needed. In addition to those major changes, two minor ones worth noting: 1. I renamed all OutputSection variable names to `osec`, to parallel `isec`. Previously we were using some inconsistent combination of `osec`, `os`, and `section`. 2. I added a check (and a test) for InputSections with names that clashed with those of our synthetic OutputSections. Reviewers: #lld-macho Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81887
2020-06-15 07:03:24 +00:00
getOrCreateOutputSegment(ssec->segname)->addOutputSection(ssec);
[lld-macho] Implement cstring deduplication Our implementation draws heavily from LLD-ELF's, which in turn delegates its string deduplication to llvm-mc's StringTableBuilder. The messiness of this diff is largely due to the fact that we've previously assumed that all InputSections get concatenated together to form the output. This is no longer true with CStringInputSections, which split their contents into StringPieces. StringPieces are much more lightweight than InputSections, which is important as we create a lot of them. They may also overlap in the output, which makes it possible for strings to be tail-merged. In fact, the initial version of this diff implemented tail merging, but I've dropped it for reasons I'll explain later. **Alignment Issues** Mergeable cstring literals are found under the `__TEXT,__cstring` section. In contrast to ELF, which puts strings that need different alignments into different sections, clang's Mach-O backend puts them all in one section. Strings that need to be aligned have the `.p2align` directive emitted before them, which simply translates into zero padding in the object file. I *think* ld64 extracts the desired per-string alignment from this data by preserving each string's offset from the last section-aligned address. I'm not entirely certain since it doesn't seem consistent about doing this; but perhaps this can be chalked up to cases where ld64 has to deduplicate strings with different offset/alignment combos -- it seems to pick one of their alignments to preserve. This doesn't seem correct in general; we can in fact can induce ld64 to produce a crashing binary just by linking in an additional object file that only contains cstrings and no code. See PR50563 for details. Moreover, this scheme seems rather inefficient: since unaligned and aligned strings are all put in the same section, which has a single alignment value, it doesn't seem possible to tell whether a given string doesn't have any alignment requirements. Preserving offset+alignments for strings that don't need it is wasteful. In practice, the crashes seen so far seem to stem from x86_64 SIMD operations on cstrings. X86_64 requires SIMD accesses to be 16-byte-aligned. So for now, I'm thinking of just aligning all strings to 16 bytes on x86_64. This is indeed wasteful, but implementation-wise it's simpler than preserving per-string alignment+offsets. It also avoids the aforementioned crash after deduplication of differently-aligned strings. Finally, the overhead is not huge: using 16-byte alignment (vs no alignment) is only a 0.5% size overhead when linking chromium_framework. With these alignment requirements, it doesn't make sense to attempt tail merging -- most strings will not be eligible since their overlaps aren't likely to start at a 16-byte boundary. Tail-merging (with alignment) for chromium_framework only improves size by 0.3%. It's worth noting that LLD-ELF only does tail merging at `-O2`. By default (at `-O1`), it just deduplicates w/o tail merging. @thakis has also mentioned that they saw it regress compressed size in some cases and therefore turned it off. `ld64` does not seem to do tail merging at all. **Performance Numbers** CString deduplication reduces chromium_framework from 250MB to 242MB, or about a 3.2% reduction. Numbers for linking chromium_framework on my 3.2 GHz 16-Core Intel Xeon W: N Min Max Median Avg Stddev x 20 3.91 4.03 3.935 3.95 0.034641016 + 20 3.99 4.14 4.015 4.0365 0.0492336 Difference at 95.0% confidence 0.0865 +/- 0.027245 2.18987% +/- 0.689746% (Student's t, pooled s = 0.0425673) As expected, cstring merging incurs some non-trivial overhead. When passing `--no-literal-merge`, it seems that performance is the same, i.e. the refactoring in this diff didn't cost us. N Min Max Median Avg Stddev x 20 3.91 4.03 3.935 3.95 0.034641016 + 20 3.89 4.02 3.935 3.9435 0.043197831 No difference proven at 95.0% confidence Reviewed By: #lld-macho, gkm Differential Revision: https://reviews.llvm.org/D102964
2021-06-08 03:47:12 +00:00
} else {
fatal("section from " +
toString(it->second->firstSection()->getFile()) +
[lld-macho] Implement cstring deduplication Our implementation draws heavily from LLD-ELF's, which in turn delegates its string deduplication to llvm-mc's StringTableBuilder. The messiness of this diff is largely due to the fact that we've previously assumed that all InputSections get concatenated together to form the output. This is no longer true with CStringInputSections, which split their contents into StringPieces. StringPieces are much more lightweight than InputSections, which is important as we create a lot of them. They may also overlap in the output, which makes it possible for strings to be tail-merged. In fact, the initial version of this diff implemented tail merging, but I've dropped it for reasons I'll explain later. **Alignment Issues** Mergeable cstring literals are found under the `__TEXT,__cstring` section. In contrast to ELF, which puts strings that need different alignments into different sections, clang's Mach-O backend puts them all in one section. Strings that need to be aligned have the `.p2align` directive emitted before them, which simply translates into zero padding in the object file. I *think* ld64 extracts the desired per-string alignment from this data by preserving each string's offset from the last section-aligned address. I'm not entirely certain since it doesn't seem consistent about doing this; but perhaps this can be chalked up to cases where ld64 has to deduplicate strings with different offset/alignment combos -- it seems to pick one of their alignments to preserve. This doesn't seem correct in general; we can in fact can induce ld64 to produce a crashing binary just by linking in an additional object file that only contains cstrings and no code. See PR50563 for details. Moreover, this scheme seems rather inefficient: since unaligned and aligned strings are all put in the same section, which has a single alignment value, it doesn't seem possible to tell whether a given string doesn't have any alignment requirements. Preserving offset+alignments for strings that don't need it is wasteful. In practice, the crashes seen so far seem to stem from x86_64 SIMD operations on cstrings. X86_64 requires SIMD accesses to be 16-byte-aligned. So for now, I'm thinking of just aligning all strings to 16 bytes on x86_64. This is indeed wasteful, but implementation-wise it's simpler than preserving per-string alignment+offsets. It also avoids the aforementioned crash after deduplication of differently-aligned strings. Finally, the overhead is not huge: using 16-byte alignment (vs no alignment) is only a 0.5% size overhead when linking chromium_framework. With these alignment requirements, it doesn't make sense to attempt tail merging -- most strings will not be eligible since their overlaps aren't likely to start at a 16-byte boundary. Tail-merging (with alignment) for chromium_framework only improves size by 0.3%. It's worth noting that LLD-ELF only does tail merging at `-O2`. By default (at `-O1`), it just deduplicates w/o tail merging. @thakis has also mentioned that they saw it regress compressed size in some cases and therefore turned it off. `ld64` does not seem to do tail merging at all. **Performance Numbers** CString deduplication reduces chromium_framework from 250MB to 242MB, or about a 3.2% reduction. Numbers for linking chromium_framework on my 3.2 GHz 16-Core Intel Xeon W: N Min Max Median Avg Stddev x 20 3.91 4.03 3.935 3.95 0.034641016 + 20 3.99 4.14 4.015 4.0365 0.0492336 Difference at 95.0% confidence 0.0865 +/- 0.027245 2.18987% +/- 0.689746% (Student's t, pooled s = 0.0425673) As expected, cstring merging incurs some non-trivial overhead. When passing `--no-literal-merge`, it seems that performance is the same, i.e. the refactoring in this diff didn't cost us. N Min Max Median Avg Stddev x 20 3.91 4.03 3.935 3.95 0.034641016 + 20 3.89 4.02 3.935 3.9435 0.043197831 No difference proven at 95.0% confidence Reviewed By: #lld-macho, gkm Differential Revision: https://reviews.llvm.org/D102964
2021-06-08 03:47:12 +00:00
" conflicts with synthetic section " + ssec->segname + "," +
ssec->name);
}
[lld-macho] Refactor segment/section creation, sorting, and merging Summary: There were a few issues with the previous setup: 1. The section sorting comparator used a declarative map of section names to determine the correct order, but it turns out we need to match on more than just names -- in particular, an upcoming diff will sort based on whether the S_ZERO_FILL flag is set. This diff changes the sorter to a more imperative but flexible form. 2. We were sorting OutputSections stored in a MapVector, which left the MapVector in an inconsistent state -- the wrong keys map to the wrong values! In practice, we weren't doing key lookups (only container iteration) after the sort, so this was fine, but it was still a dubious state of affairs. This diff copies the OutputSections to a vector before sorting them. 3. We were adding unneeded OutputSections to OutputSegments and then filtering them out later, which meant that we had to remember whether an OutputSegment was in a pre- or post-filtered state. This diff only adds the sections to the segments if they are needed. In addition to those major changes, two minor ones worth noting: 1. I renamed all OutputSection variable names to `osec`, to parallel `isec`. Previously we were using some inconsistent combination of `osec`, `os`, and `section`. 2. I added a check (and a test) for InputSections with names that clashed with those of our synthetic OutputSections. Reviewers: #lld-macho Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81887
2020-06-15 07:03:24 +00:00
}
}
// dyld requires __LINKEDIT segment to always exist (even if empty).
linkEditSegment = getOrCreateOutputSegment(segment_names::linkEdit);
}
void Writer::finalizeAddresses() {
TimeTraceScope timeScope("Finalize addresses");
uint64_t pageSize = target->getPageSize();
2022-04-07 22:13:27 +00:00
// We could parallelize this loop, but local benchmarking indicates it is
// faster to do it all in the main thread.
for (OutputSegment *seg : outputSegments) {
if (seg == linkEditSegment)
continue;
for (OutputSection *osec : seg->getSections()) {
if (!osec->isNeeded())
continue;
// Other kinds of OutputSections have already been finalized.
if (auto *concatOsec = dyn_cast<ConcatOutputSection>(osec))
concatOsec->finalizeContents();
2022-04-07 22:13:27 +00:00
}
}
// Ensure that segments (and the sections they contain) are allocated
// addresses in ascending order, which dyld requires.
//
// Note that at this point, __LINKEDIT sections are empty, but we need to
// determine addresses of other segments/sections before generating its
// contents.
for (OutputSegment *seg : outputSegments) {
if (seg == linkEditSegment)
continue;
seg->addr = addr;
assignAddresses(seg);
// codesign / libstuff checks for segment ordering by verifying that
// `fileOff + fileSize == next segment fileOff`. So we call
// alignToPowerOf2() before (instead of after) computing fileSize to ensure
// that the segments are contiguous. We handle addr / vmSize similarly for
// the same reason.
fileOff = alignToPowerOf2(fileOff, pageSize);
addr = alignToPowerOf2(addr, pageSize);
seg->vmSize = addr - seg->addr;
seg->fileSize = fileOff - seg->fileOff;
seg->assignAddressesToStartEndSymbols();
}
}
void Writer::finalizeLinkEditSegment() {
TimeTraceScope timeScope("Finalize __LINKEDIT segment");
// Fill __LINKEDIT contents.
std::array<LinkEditSection *, 10> linkEditSections{
in.rebase, in.binding,
in.weakBinding, in.lazyBinding,
in.exports, in.chainedFixups,
symtabSection, indirectSymtabSection,
dataInCodeSection, functionStartsSection,
};
SmallVector<std::shared_future<void>> threadFutures;
threadFutures.reserve(linkEditSections.size());
for (LinkEditSection *osec : linkEditSections)
if (osec)
threadFutures.emplace_back(threadPool.async(
[](LinkEditSection *osec) { osec->finalizeContents(); }, osec));
for (std::shared_future<void> &future : threadFutures)
future.wait();
// Now that __LINKEDIT is filled out, do a proper calculation of its
// addresses and offsets.
linkEditSegment->addr = addr;
assignAddresses(linkEditSegment);
// No need to page-align fileOff / addr here since this is the last segment.
linkEditSegment->vmSize = addr - linkEditSegment->addr;
linkEditSegment->fileSize = fileOff - linkEditSegment->fileOff;
}
void Writer::assignAddresses(OutputSegment *seg) {
seg->fileOff = fileOff;
for (OutputSection *osec : seg->getSections()) {
if (!osec->isNeeded())
continue;
addr = alignToPowerOf2(addr, osec->align);
fileOff = alignToPowerOf2(fileOff, osec->align);
[lld-macho] Refactor segment/section creation, sorting, and merging Summary: There were a few issues with the previous setup: 1. The section sorting comparator used a declarative map of section names to determine the correct order, but it turns out we need to match on more than just names -- in particular, an upcoming diff will sort based on whether the S_ZERO_FILL flag is set. This diff changes the sorter to a more imperative but flexible form. 2. We were sorting OutputSections stored in a MapVector, which left the MapVector in an inconsistent state -- the wrong keys map to the wrong values! In practice, we weren't doing key lookups (only container iteration) after the sort, so this was fine, but it was still a dubious state of affairs. This diff copies the OutputSections to a vector before sorting them. 3. We were adding unneeded OutputSections to OutputSegments and then filtering them out later, which meant that we had to remember whether an OutputSegment was in a pre- or post-filtered state. This diff only adds the sections to the segments if they are needed. In addition to those major changes, two minor ones worth noting: 1. I renamed all OutputSection variable names to `osec`, to parallel `isec`. Previously we were using some inconsistent combination of `osec`, `os`, and `section`. 2. I added a check (and a test) for InputSections with names that clashed with those of our synthetic OutputSections. Reviewers: #lld-macho Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81887
2020-06-15 07:03:24 +00:00
osec->addr = addr;
osec->fileOff = isZeroFill(osec->flags) ? 0 : fileOff;
osec->finalize();
osec->assignAddressesToStartEndSymbols();
[lld-macho] Refactor segment/section creation, sorting, and merging Summary: There were a few issues with the previous setup: 1. The section sorting comparator used a declarative map of section names to determine the correct order, but it turns out we need to match on more than just names -- in particular, an upcoming diff will sort based on whether the S_ZERO_FILL flag is set. This diff changes the sorter to a more imperative but flexible form. 2. We were sorting OutputSections stored in a MapVector, which left the MapVector in an inconsistent state -- the wrong keys map to the wrong values! In practice, we weren't doing key lookups (only container iteration) after the sort, so this was fine, but it was still a dubious state of affairs. This diff copies the OutputSections to a vector before sorting them. 3. We were adding unneeded OutputSections to OutputSegments and then filtering them out later, which meant that we had to remember whether an OutputSegment was in a pre- or post-filtered state. This diff only adds the sections to the segments if they are needed. In addition to those major changes, two minor ones worth noting: 1. I renamed all OutputSection variable names to `osec`, to parallel `isec`. Previously we were using some inconsistent combination of `osec`, `os`, and `section`. 2. I added a check (and a test) for InputSections with names that clashed with those of our synthetic OutputSections. Reviewers: #lld-macho Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81887
2020-06-15 07:03:24 +00:00
addr += osec->getSize();
fileOff += osec->getFileSize();
}
}
void Writer::openFile() {
Expected<std::unique_ptr<FileOutputBuffer>> bufferOrErr =
FileOutputBuffer::create(config->outputFile, fileOff,
FileOutputBuffer::F_executable);
if (!bufferOrErr)
fatal("failed to open " + config->outputFile + ": " +
llvm::toString(bufferOrErr.takeError()));
buffer = std::move(*bufferOrErr);
in.bufferStart = buffer->getBufferStart();
}
void Writer::writeSections() {
uint8_t *buf = buffer->getBufferStart();
std::vector<const OutputSection *> osecs;
for (const OutputSegment *seg : outputSegments)
append_range(osecs, seg->getSections());
parallelForEach(osecs.begin(), osecs.end(), [&](const OutputSection *osec) {
osec->writeTo(buf + osec->fileOff);
});
}
void Writer::applyOptimizationHints() {
if (config->arch() != AK_arm64 || config->ignoreOptimizationHints)
return;
uint8_t *buf = buffer->getBufferStart();
TimeTraceScope timeScope("Apply linker optimization hints");
parallelForEach(inputFiles, [buf](const InputFile *file) {
if (const auto *objFile = dyn_cast<ObjFile>(file))
target->applyOptimizationHints(buf, *objFile);
});
}
// In order to utilize multiple cores, we first split the buffer into chunks,
// compute a hash for each chunk, and then compute a hash value of the hash
// values.
void Writer::writeUuid() {
TimeTraceScope timeScope("Computing UUID");
ArrayRef<uint8_t> data{buffer->getBufferStart(), buffer->getBufferEnd()};
std::vector<ArrayRef<uint8_t>> chunks = split(data, 1024 * 1024);
// Leave one slot for filename
std::vector<uint64_t> hashes(chunks.size() + 1);
SmallVector<std::shared_future<void>> threadFutures;
threadFutures.reserve(chunks.size());
for (size_t i = 0; i < chunks.size(); ++i)
threadFutures.emplace_back(threadPool.async(
[&](size_t j) { hashes[j] = xxh3_64bits(chunks[j]); }, i));
for (std::shared_future<void> &future : threadFutures)
future.wait();
// Append the output filename so that identical binaries with different names
// don't get the same UUID.
hashes[chunks.size()] = xxh3_64bits(sys::path::filename(config->finalOutput));
uint64_t digest = xxh3_64bits({reinterpret_cast<uint8_t *>(hashes.data()),
hashes.size() * sizeof(uint64_t)});
uuidCommand->writeUuid(digest);
}
// This is step 5 of the algorithm described in the class comment of
// ChainedFixupsSection.
void Writer::buildFixupChains() {
if (!config->emitChainedFixups)
return;
const std::vector<Location> &loc = in.chainedFixups->getLocations();
if (loc.empty())
return;
TimeTraceScope timeScope("Build fixup chains");
const uint64_t pageSize = target->getPageSize();
constexpr uint32_t stride = 4; // for DYLD_CHAINED_PTR_64
for (size_t i = 0, count = loc.size(); i < count;) {
const OutputSegment *oseg = loc[i].isec->parent->parent;
uint8_t *buf = buffer->getBufferStart() + oseg->fileOff;
uint64_t pageIdx = loc[i].offset / pageSize;
++i;
while (i < count && loc[i].isec->parent->parent == oseg &&
(loc[i].offset / pageSize) == pageIdx) {
uint64_t offset = loc[i].offset - loc[i - 1].offset;
auto fail = [&](Twine message) {
error(loc[i].isec->getSegName() + "," + loc[i].isec->getName() +
", offset " +
Twine(loc[i].offset - loc[i].isec->parent->getSegmentOffset()) +
": " + message);
};
if (offset < target->wordSize)
return fail("fixups overlap");
if (offset % stride != 0)
return fail(
"fixups are unaligned (offset " + Twine(offset) +
" is not a multiple of the stride). Re-link with -no_fixup_chains");
// The "next" field is in the same location for bind and rebase entries.
reinterpret_cast<dyld_chained_ptr_64_bind *>(buf + loc[i - 1].offset)
->next = offset / stride;
++i;
}
}
}
void Writer::writeCodeSignature() {
if (codeSignatureSection) {
TimeTraceScope timeScope("Write code signature");
codeSignatureSection->writeHashes(buffer->getBufferStart());
}
}
void Writer::writeOutputFile() {
TimeTraceScope timeScope("Write output file");
openFile();
reportPendingUndefinedSymbols();
if (errorCount())
return;
writeSections();
applyOptimizationHints();
buildFixupChains();
if (config->generateUuid)
writeUuid();
writeCodeSignature();
if (auto e = buffer->commit())
fatal("failed to write output '" + buffer->getPath() +
"': " + toString(std::move(e)));
}
template <class LP> void Writer::run() {
treatSpecialUndefineds();
if (config->entry && needsBinding(config->entry))
in.stubs->addEntry(config->entry);
// Canonicalization of all pointers to InputSections should be handled by
// these two scan* methods. I.e. from this point onward, for all live
// InputSections, we should have `isec->canonical() == isec`.
scanSymbols();
[lld-macho] Add support for objc_msgSend stubs Apple Clang in Xcode 14 introduced a new feature for reducing the overhead of objc_msgSend calls by deduplicating the setup calls for each individual selector. This works by clang adding undefined symbols for each selector called in a translation unit, such as `_objc_msgSend$foo` for calling the `foo` method on any `NSObject`. There are 2 different modes for this behavior, the default directly does the setup for `_objc_msgSend` and calls it, and the smaller option does the selector setup, and then calls the standard `_objc_msgSend` stub function. The general overview of how this works is: - Undefined symbols with the given prefix are collected - The suffix of each matching undefined symbol is added as a string to `__objc_methname` - A pointer is added for every method name in the `__objc_selrefs` section - A `got` entry is emitted for `_objc_msgSend` - Stubs are emitting pointing to the synthesized locations Notes: - Both `__objc_methname` and `__objc_selrefs` can also exist from object files, so their contents are merged with our synthesized contents - The compiler emits method names for defined methods, but not for undefined symbols you call, but stubs are used for both - This only implements the default "fast" mode currently just to reduce the diff, I also doubt many folks will care to swap modes - This only implements this for arm64 and x86_64, we don't need to implement this for 32 bit iOS archs, but we should implement it for watchOS archs in a later diff Differential Revision: https://reviews.llvm.org/D128108
2022-06-17 04:35:18 +00:00
if (in.objcStubs->isNeeded())
in.objcStubs->setUp();
scanRelocations();
if (in.initOffsets->isNeeded())
in.initOffsets->setUp();
// Do not proceed if there were undefined or duplicate symbols.
reportPendingUndefinedSymbols();
reportPendingDuplicateSymbols();
if (errorCount())
return;
if (in.stubHelper && in.stubHelper->isNeeded())
in.stubHelper->setUp();
if (in.objCImageInfo->isNeeded())
in.objCImageInfo->finalizeContents();
// At this point, we should know exactly which output sections are needed,
// courtesy of scanSymbols() and scanRelocations().
createOutputSections<LP>();
// After this point, we create no new segments; HOWEVER, we might
// yet create branch-range extension thunks for architectures whose
// hardware call instructions have limited range, e.g., ARM(64).
// The thunks are created as InputSections interspersed among
// the ordinary __TEXT,_text InputSections.
sortSegmentsAndSections();
createLoadCommands<LP>();
finalizeAddresses();
threadPool.async([&] {
if (LLVM_ENABLE_THREADS && config->timeTraceEnabled)
timeTraceProfilerInitialize(config->timeTraceGranularity, "writeMapFile");
writeMapFile();
if (LLVM_ENABLE_THREADS && config->timeTraceEnabled)
timeTraceProfilerFinishThread();
});
finalizeLinkEditSegment();
writeOutputFile();
}
template <class LP> void macho::writeResult() { Writer().run<LP>(); }
void macho::resetWriter() { LCDylib::resetInstanceCount(); }
void macho::createSyntheticSections() {
in.header = make<MachHeaderSection>();
if (config->dedupStrings)
[lld-macho] Add support for objc_msgSend stubs Apple Clang in Xcode 14 introduced a new feature for reducing the overhead of objc_msgSend calls by deduplicating the setup calls for each individual selector. This works by clang adding undefined symbols for each selector called in a translation unit, such as `_objc_msgSend$foo` for calling the `foo` method on any `NSObject`. There are 2 different modes for this behavior, the default directly does the setup for `_objc_msgSend` and calls it, and the smaller option does the selector setup, and then calls the standard `_objc_msgSend` stub function. The general overview of how this works is: - Undefined symbols with the given prefix are collected - The suffix of each matching undefined symbol is added as a string to `__objc_methname` - A pointer is added for every method name in the `__objc_selrefs` section - A `got` entry is emitted for `_objc_msgSend` - Stubs are emitting pointing to the synthesized locations Notes: - Both `__objc_methname` and `__objc_selrefs` can also exist from object files, so their contents are merged with our synthesized contents - The compiler emits method names for defined methods, but not for undefined symbols you call, but stubs are used for both - This only implements the default "fast" mode currently just to reduce the diff, I also doubt many folks will care to swap modes - This only implements this for arm64 and x86_64, we don't need to implement this for 32 bit iOS archs, but we should implement it for watchOS archs in a later diff Differential Revision: https://reviews.llvm.org/D128108
2022-06-17 04:35:18 +00:00
in.cStringSection =
make<DeduplicatedCStringSection>(section_names::cString);
else
[lld-macho] Add support for objc_msgSend stubs Apple Clang in Xcode 14 introduced a new feature for reducing the overhead of objc_msgSend calls by deduplicating the setup calls for each individual selector. This works by clang adding undefined symbols for each selector called in a translation unit, such as `_objc_msgSend$foo` for calling the `foo` method on any `NSObject`. There are 2 different modes for this behavior, the default directly does the setup for `_objc_msgSend` and calls it, and the smaller option does the selector setup, and then calls the standard `_objc_msgSend` stub function. The general overview of how this works is: - Undefined symbols with the given prefix are collected - The suffix of each matching undefined symbol is added as a string to `__objc_methname` - A pointer is added for every method name in the `__objc_selrefs` section - A `got` entry is emitted for `_objc_msgSend` - Stubs are emitting pointing to the synthesized locations Notes: - Both `__objc_methname` and `__objc_selrefs` can also exist from object files, so their contents are merged with our synthesized contents - The compiler emits method names for defined methods, but not for undefined symbols you call, but stubs are used for both - This only implements the default "fast" mode currently just to reduce the diff, I also doubt many folks will care to swap modes - This only implements this for arm64 and x86_64, we don't need to implement this for 32 bit iOS archs, but we should implement it for watchOS archs in a later diff Differential Revision: https://reviews.llvm.org/D128108
2022-06-17 04:35:18 +00:00
in.cStringSection = make<CStringSection>(section_names::cString);
in.objcMethnameSection =
make<DeduplicatedCStringSection>(section_names::objcMethname);
in.wordLiteralSection = make<WordLiteralSection>();
if (config->emitChainedFixups) {
in.chainedFixups = make<ChainedFixupsSection>();
} else {
in.rebase = make<RebaseSection>();
in.binding = make<BindingSection>();
in.weakBinding = make<WeakBindingSection>();
in.lazyBinding = make<LazyBindingSection>();
in.lazyPointers = make<LazyPointerSection>();
in.stubHelper = make<StubHelperSection>();
}
in.exports = make<ExportSection>();
in.got = make<GotSection>();
in.tlvPointers = make<TlvPointerSection>();
in.stubs = make<StubsSection>();
[lld-macho] Add support for objc_msgSend stubs Apple Clang in Xcode 14 introduced a new feature for reducing the overhead of objc_msgSend calls by deduplicating the setup calls for each individual selector. This works by clang adding undefined symbols for each selector called in a translation unit, such as `_objc_msgSend$foo` for calling the `foo` method on any `NSObject`. There are 2 different modes for this behavior, the default directly does the setup for `_objc_msgSend` and calls it, and the smaller option does the selector setup, and then calls the standard `_objc_msgSend` stub function. The general overview of how this works is: - Undefined symbols with the given prefix are collected - The suffix of each matching undefined symbol is added as a string to `__objc_methname` - A pointer is added for every method name in the `__objc_selrefs` section - A `got` entry is emitted for `_objc_msgSend` - Stubs are emitting pointing to the synthesized locations Notes: - Both `__objc_methname` and `__objc_selrefs` can also exist from object files, so their contents are merged with our synthesized contents - The compiler emits method names for defined methods, but not for undefined symbols you call, but stubs are used for both - This only implements the default "fast" mode currently just to reduce the diff, I also doubt many folks will care to swap modes - This only implements this for arm64 and x86_64, we don't need to implement this for 32 bit iOS archs, but we should implement it for watchOS archs in a later diff Differential Revision: https://reviews.llvm.org/D128108
2022-06-17 04:35:18 +00:00
in.objcStubs = make<ObjCStubsSection>();
in.unwindInfo = makeUnwindInfoSection();
in.objCImageInfo = make<ObjCImageInfoSection>();
in.initOffsets = make<InitOffsetsSection>();
// This section contains space for just a single word, and will be used by
// dyld to cache an address to the image loader it uses.
uint8_t *arr = bAlloc().Allocate<uint8_t>(target->wordSize);
memset(arr, 0, target->wordSize);
[lld-macho][nfc] Eliminate InputSection::Shared Earlier in LLD's evolution, I tried to create the illusion that subsections were indistinguishable from "top-level" sections. Thus, even though the subsections shared many common field values, I hid those common values away in a private Shared struct (see D105305). More recently, however, @gkm added a public `Section` struct in D113241 that served as an explicit way to store values that are common to an entire set of subsections (aka InputSections). Now that we have another "common value" struct, `Shared` has been rendered redundant. All its fields can be moved into `Section` instead, and the pointer to `Shared` can be replaced with a pointer to `Section`. This `Section` pointer also has the advantage of letting us inspect other subsections easily, simplifying the implementation of {D118798}. P.S. I do think that having both `Section` and `InputSection` makes for a slightly confusing naming scheme. I considered renaming `InputSection` to `Subsection`, but that would break the symmetry with `OutputSection`. It would also make us deviate from LLD-ELF's naming scheme. This change is perf-neutral on my 3.2 GHz 16-Core Intel Xeon W machine: base diff difference (95% CI) sys_time 1.258 ± 0.031 1.248 ± 0.023 [ -1.6% .. +0.1%] user_time 3.659 ± 0.047 3.658 ± 0.041 [ -0.5% .. +0.4%] wall_time 4.640 ± 0.085 4.625 ± 0.063 [ -1.0% .. +0.3%] samples 49 61 There's also no stat sig change in RSS (as measured by `time -l`): base diff difference (95% CI) time 998038627.097 ± 13567305.958 1003327715.556 ± 15210451.236 [ -0.2% .. +1.2%] samples 31 36 Reviewed By: #lld-macho, oontvoo Differential Revision: https://reviews.llvm.org/D118797
2022-02-04 00:53:29 +00:00
in.imageLoaderCache = makeSyntheticInputSection(
segment_names::data, section_names::data, S_REGULAR,
ArrayRef<uint8_t>{arr, target->wordSize},
[lld-macho][nfc] Eliminate InputSection::Shared Earlier in LLD's evolution, I tried to create the illusion that subsections were indistinguishable from "top-level" sections. Thus, even though the subsections shared many common field values, I hid those common values away in a private Shared struct (see D105305). More recently, however, @gkm added a public `Section` struct in D113241 that served as an explicit way to store values that are common to an entire set of subsections (aka InputSections). Now that we have another "common value" struct, `Shared` has been rendered redundant. All its fields can be moved into `Section` instead, and the pointer to `Shared` can be replaced with a pointer to `Section`. This `Section` pointer also has the advantage of letting us inspect other subsections easily, simplifying the implementation of {D118798}. P.S. I do think that having both `Section` and `InputSection` makes for a slightly confusing naming scheme. I considered renaming `InputSection` to `Subsection`, but that would break the symmetry with `OutputSection`. It would also make us deviate from LLD-ELF's naming scheme. This change is perf-neutral on my 3.2 GHz 16-Core Intel Xeon W machine: base diff difference (95% CI) sys_time 1.258 ± 0.031 1.248 ± 0.023 [ -1.6% .. +0.1%] user_time 3.659 ± 0.047 3.658 ± 0.041 [ -0.5% .. +0.4%] wall_time 4.640 ± 0.085 4.625 ± 0.063 [ -1.0% .. +0.3%] samples 49 61 There's also no stat sig change in RSS (as measured by `time -l`): base diff difference (95% CI) time 998038627.097 ± 13567305.958 1003327715.556 ± 15210451.236 [ -0.2% .. +1.2%] samples 31 36 Reviewed By: #lld-macho, oontvoo Differential Revision: https://reviews.llvm.org/D118797
2022-02-04 00:53:29 +00:00
/*align=*/target->wordSize);
// References from dyld are not visible to us, so ensure this section is
// always treated as live.
in.imageLoaderCache->live = true;
}
OutputSection *macho::firstTLVDataSection = nullptr;
template void macho::writeResult<LP64>();
template void macho::writeResult<ILP32>();