Bug 1855557 - Remove clang 16 toolchains. r=firefox-build-system-reviewers,sergesanspaille

Differential Revision: https://phabricator.services.mozilla.com/D189427
This commit is contained in:
Mike Hommey 2023-10-02 20:01:07 +00:00
parent aeffb692d3
commit f955aa7555
20 changed files with 0 additions and 3755 deletions

View File

@ -1,75 +0,0 @@
From: Mike Hommey <mh@glandium.org>
Date: Thu, 23 Mar 2023 06:52:28 +0900
Subject: [PATCH] Apply the same fallbacks as runtimes search for stdlib search
When building clang with e.g. LLVM_ENABLE_RUNTIMES=libcxx;libunwind,
those runtimes end up in the stdlib search directory, and when
LLVM_ENABLE_PER_TARGET_RUNTIME_DIR is set, that ends up in a
target-specific subdirectory. The stdlib search does handle the
situation, but when the target in question is Android, the same issues
as those that required fallbacks for runtimes search apply.
Traditionally, those libraries are shipped as part of the Android NDK,
but when one builds their own clang for Android, they may want to use
the runtimes from the same version rather than the ones from the NDK.
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index 7a4319ea680f..afbc7de8befd 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -535,34 +535,39 @@ const char *ToolChain::getCompilerRTArgString(const llvm::opt::ArgList &Args,
return Args.MakeArgString(getCompilerRT(Args, Component, Type));
}
-ToolChain::path_list ToolChain::getRuntimePaths() const {
- path_list Paths;
- auto addPathForTriple = [this, &Paths](const llvm::Triple &Triple) {
- SmallString<128> P(D.ResourceDir);
- llvm::sys::path::append(P, "lib", Triple.str());
- Paths.push_back(std::string(P.str()));
- };
-
- addPathForTriple(getTriple());
+template <typename F>
+static void fillPaths(const ToolChain &TC, F addPathForTriple) {
+ addPathForTriple(TC.getTriple());
// Android targets may include an API level at the end. We still want to fall
// back on a path without the API level.
- if (getTriple().isAndroid() &&
- getTriple().getEnvironmentName() != "android") {
- llvm::Triple TripleWithoutLevel = getTriple();
+ if (TC.getTriple().isAndroid() &&
+ TC.getTriple().getEnvironmentName() != "android") {
+ llvm::Triple TripleWithoutLevel = TC.getTriple();
TripleWithoutLevel.setEnvironmentName("android");
addPathForTriple(TripleWithoutLevel);
}
+}
+ToolChain::path_list ToolChain::getRuntimePaths() const {
+ path_list Paths;
+ auto addPathForTriple = [this, &Paths](const llvm::Triple &Triple) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "lib", Triple.str());
+ Paths.push_back(std::string(P.str()));
+ };
+ fillPaths(*this, addPathForTriple);
return Paths;
}
ToolChain::path_list ToolChain::getStdlibPaths() const {
path_list Paths;
- SmallString<128> P(D.Dir);
- llvm::sys::path::append(P, "..", "lib", getTripleString());
- Paths.push_back(std::string(P.str()));
-
+ auto addPathForTriple = [this, &Paths](const llvm::Triple &Triple) {
+ SmallString<128> P(D.Dir);
+ llvm::sys::path::append(P, "..", "lib", Triple.str());
+ Paths.push_back(std::string(P.str()));
+ };
+ fillPaths(*this, addPathForTriple);
return Paths;
}

View File

@ -1,28 +0,0 @@
{
"patches": [
"find_symbolizer_linux_clang_15.patch",
"android-mangling-error_clang_12.patch",
"unpoison-thread-stacks_clang_10.patch",
"downgrade-mangling-error_clang_12.patch",
"fuzzing_ccov_build_clang_12.patch",
"partial-revert-llvmorg-16-init-17151-gaa0883b59ae1.patch",
"revert-llvmorg-16-init-11301-g163bb6d64e5f.patch",
"revert-llvmorg-15-init-13446-g7524fe962e47.patch",
"revert-llvmorg-15-init-11205-gcead4eceb01b_clang_16.patch",
"revert-llvmorg-14-init-14141-gd6d3000a2f6d.patch",
"revert-llvmorg-14-init-11890-gf86deb18cab6_clang_16.patch",
"llvmorg-17-init-237-g1b9fbc81ff15.patch",
"llvmorg-17-init-994-g1e72920c8859.patch",
"llvmorg-17-init-1242-g5de5f66b984a.patch",
"llvmorg-17-init-2171-g8198f30f7e75.patch",
"llvmorg-17-init-4170-g5c602c46b1ef.patch",
"llvmorg-17-init-6897-g415b1cfd57de.patch",
"llvmorg-17-init-6905-gc81a322476a1.patch",
"llvmorg-17-init-6909-gd644ab022a7b.patch",
"llvmorg-17-init-8140-gb1bd52cd0d86.patch",
"llvmorg-17-init-13415-g57dc16fbe307.patch",
"D146664_clang_15.patch",
"win64-ret-null-on-commitment-limit_clang_14.patch",
"compiler-rt-rss-limit-heap-profile.patch"
]
}

View File

@ -1,317 +0,0 @@
From beb699370963cb347f636435efc8409219c58f5f Mon Sep 17 00:00:00 2001
From: John Brawn <john.brawn@arm.com>
Date: Mon, 30 Jan 2023 14:34:14 +0000
Subject: [PATCH] [extract_symbols.py] Better handling of templates
Since commit 846b676 SmallVectorBase<uint32_t> has been explicitly
instantiated, which means that clang.exe must export it for a plugin
to be able to link against it, but the constructor is not exported as
currently no template constructors or destructors are exported.
We can't just export all constructors and destructors, as that puts us
over the symbol limit on Windows, so instead rewrite how we decide
which templates need to be exported to be more precise. Currently we
assume that templates instantiated many times have no explicit
instantiations, but this isn't necessarily true and results also in
exporting implicit template instantiations that we don't need
to. Instead check for references to template members, as this
indicates that the template must be explicitly instantiated (as if it
weren't the template would just be implicitly instantiated on use).
Doing this reduces the number of symbols exported from clang from
66011 to 53993 (in the build configuration that I've been testing). It
also lets us get rid of the special-case handling of Type::getAs, as
its explicit instantiations are now being detected as such.
Differential Revision: https://reviews.llvm.org/D142989
---
llvm/utils/extract_symbols.py | 200 ++++++++++++++++++----------------
1 file changed, 104 insertions(+), 96 deletions(-)
diff --git a/llvm/utils/extract_symbols.py b/llvm/utils/extract_symbols.py
index 298ee6ba4eeb..f64e3d1eebb9 100755
--- a/llvm/utils/extract_symbols.py
+++ b/llvm/utils/extract_symbols.py
@@ -23,11 +23,11 @@ import subprocess
import multiprocessing
import argparse
-# Define functions which extract a list of symbols from a library using several
-# different tools. We use subprocess.Popen and yield a symbol at a time instead
-# of using subprocess.check_output and returning a list as, especially on
-# Windows, waiting for the entire output to be ready can take a significant
-# amount of time.
+# Define functions which extract a list of pairs of (symbols, is_def) from a
+# library using several different tools. We use subprocess.Popen and yield a
+# symbol at a time instead of using subprocess.check_output and returning a list
+# as, especially on Windows, waiting for the entire output to be ready can take
+# a significant amount of time.
def dumpbin_get_symbols(lib):
process = subprocess.Popen(['dumpbin','/symbols',lib], bufsize=1,
@@ -35,10 +35,10 @@ def dumpbin_get_symbols(lib):
universal_newlines=True)
process.stdin.close()
for line in process.stdout:
- # Look for external symbols that are defined in some section
- match = re.match("^.+SECT.+External\s+\|\s+(\S+).*$", line)
+ # Look for external symbols
+ match = re.match("^.+(SECT|UNDEF).+External\s+\|\s+(\S+).*$", line)
if match:
- yield match.group(1)
+ yield (match.group(2), match.group(1) != "UNDEF")
process.wait()
def nm_get_symbols(lib):
@@ -60,7 +60,11 @@ def nm_get_symbols(lib):
# but \s+ match newline also, so \s+\S* will match the optional size field.
match = re.match("^(\S+)\s+[BDGRSTVW]\s+\S+\s+\S*$", line)
if match:
- yield match.group(1)
+ yield (match.group(1), True)
+ # Look for undefined symbols, which have only name and type (which is U).
+ match = re.match("^(\S+)\s+U\s+$", line)
+ if match:
+ yield (match.group(1), False)
process.wait()
def readobj_get_symbols(lib):
@@ -71,7 +75,7 @@ def readobj_get_symbols(lib):
for line in process.stdout:
# When looking through the output of llvm-readobj we expect to see Name,
# Section, then StorageClass, so record Name and Section when we see
- # them and decide if this is a defined external symbol when we see
+ # them and decide if this is an external symbol when we see
# StorageClass.
match = re.search('Name: (\S+)', line)
if match:
@@ -83,9 +87,8 @@ def readobj_get_symbols(lib):
if match:
storageclass = match.group(1)
if section != 'IMAGE_SYM_ABSOLUTE' and \
- section != 'IMAGE_SYM_UNDEFINED' and \
storageclass == 'External':
- yield name
+ yield (name, section != 'IMAGE_SYM_UNDEFINED')
process.wait()
# Define functions which determine if the target is 32-bit Windows (as that's
@@ -146,23 +149,11 @@ def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
if symbol.startswith(("__xmm@", "__ymm@", "__real@")):
return None
return symbol
- # Function template instantiations start with ?$; keep the instantiations of
- # clang::Type::getAs, as some of them are explipict specializations that are
- # defined in clang's lib/AST/Type.cpp; discard the rest as it's assumed that
- # the definition is public
- elif re.match('\?\?\$getAs@.+@Type@clang@@', symbol):
- return symbol
- elif symbol.startswith('??$'):
- return None
# Deleting destructors start with ?_G or ?_E and can be discarded because
# link.exe gives you a warning telling you they can't be exported if you
# don't
elif symbol.startswith('??_G') or symbol.startswith('??_E'):
return None
- # Constructors (?0) and destructors (?1) of templates (?$) are assumed to be
- # defined in headers and not required to be kept
- elif symbol.startswith('??0?$') or symbol.startswith('??1?$'):
- return None
# An anonymous namespace is mangled as ?A(maybe hex number)@. Any symbol
# that mentions an anonymous namespace can be discarded, as the anonymous
# namespace doesn't exist outside of that translation unit.
@@ -216,18 +207,6 @@ def should_keep_itanium_symbol(symbol, calling_convention_decoration):
return None
if not names:
return symbol
- # Constructors and destructors of templates classes are assumed to be
- # defined in headers and not required to be kept
- if re.match('[CD][123]', names[-1][0]) and names[-2][1]:
- return None
- # Keep the instantiations of clang::Type::getAs, as some of them are
- # explipict specializations that are defined in clang's lib/AST/Type.cpp;
- # discard any other function template instantiations as it's assumed that
- # the definition is public
- elif symbol.startswith('_ZNK5clang4Type5getAs'):
- return symbol
- elif names[-1][1]:
- return None
# Keep llvm:: and clang:: names
elif names[0][0] == '4llvm' or names[0][0] == '5clang':
return symbol
@@ -338,14 +317,79 @@ def parse_itanium_nested_name(arg):
# If we get here then something went wrong
return None, None
+# Parse a microsoft mangled symbol and return a list of pairs of
+# (name, is_template). This is very rudimentary and does just enough
+# in order to determine if the first or second component is a template.
+def parse_microsoft_mangling(arg):
+ # If the name doesn't start with ? this isn't a mangled name
+ if not arg.startswith('?'):
+ return [(arg, False)]
+ arg = arg[1:]
+ components = []
+ while len(arg) > 0:
+ # If we see an empty component we've reached the end
+ if arg.startswith('@'):
+ return components
+ # Check for a simple name
+ match = re.match('(\w+)@(.+)', arg)
+ if match:
+ components.append((match.group(1), False))
+ arg = match.group(2)
+ continue
+ # Check for a special function name
+ match = re.match('(\?_?\w)(.+)', arg)
+ if match:
+ components.append((match.group(1), False))
+ arg = match.group(2)
+ continue
+ # Check for a template name
+ match = re.match('\?\$(\w+)@[^@]+@(.+)', arg)
+ if match:
+ components.append((match.group(1), True))
+ arg = match.group(2)
+ continue
+ # Some other kind of name that we can't handle
+ components.append((arg, False))
+ return components
+ return components
+
def extract_symbols(arg):
get_symbols, should_keep_symbol, calling_convention_decoration, lib = arg
- symbols = dict()
- for symbol in get_symbols(lib):
+ symbol_defs = dict()
+ symbol_refs = set()
+ for (symbol, is_def) in get_symbols(lib):
symbol = should_keep_symbol(symbol, calling_convention_decoration)
if symbol:
- symbols[symbol] = 1 + symbols.setdefault(symbol,0)
- return symbols
+ if is_def:
+ symbol_defs[symbol] = 1 + symbol_defs.setdefault(symbol,0)
+ else:
+ symbol_refs.add(symbol)
+ return (symbol_defs, symbol_refs)
+
+def get_template_name(sym, mangling):
+ # Parse the mangling into a list of (name, is_template)
+ try:
+ if mangling == 'microsoft':
+ names = parse_microsoft_mangling(sym)
+ else:
+ match = re.match('_Z(T[VTIS])?(N.+)', sym)
+ if match:
+ names, _ = parse_itanium_nested_name(match.group(2))
+ else:
+ names = None
+ except TooComplexName:
+ return None
+
+ if not names:
+ return None
+
+ # If any component is a template then return it
+ for name, is_template in names:
+ if is_template:
+ return name
+
+ # Not a template
+ return None
if __name__ == '__main__':
tool_exes = ['dumpbin','nm','objdump','llvm-readobj']
@@ -458,68 +502,32 @@ if __name__ == '__main__':
exit(1)
# Merge everything into a single dict
- symbols = dict()
- for this_lib_symbols in libs_symbols:
- for k,v in list(this_lib_symbols.items()):
- symbols[k] = v + symbols.setdefault(k,0)
-
- # Count instances of member functions of template classes, and map the
- # symbol name to the function+class. We do this under the assumption that if
- # a member function of a template class is instantiated many times it's
- # probably declared in a public header file.
- template_function_count = dict()
- template_function_mapping = dict()
- template_function_count[""] = 0
- for k in symbols:
- name = None
- if args.mangling == 'microsoft':
- # Member functions of templates start with
- # ?<fn_name>@?$<class_name>@, so we map to <fn_name>@?$<class_name>.
- # As manglings go from the innermost scope to the outermost scope
- # this means:
- # * When we have a function member of a subclass of a template
- # class then <fn_name> will actually contain the mangling of
- # both the subclass and the function member. This is fine.
- # * When we have a function member of a template subclass of a
- # (possibly template) class then it's the innermost template
- # subclass that becomes <class_name>. This should be OK so long
- # as we don't have multiple classes with a template subclass of
- # the same name.
- match = re.search("^\?(\??\w+\@\?\$\w+)\@", k)
- if match:
- name = match.group(1)
- else:
- # Find member functions of templates by demangling the name and
- # checking if the second-to-last name in the list is a template.
- match = re.match('_Z(T[VTIS])?(N.+)', k)
- if match:
- try:
- names, _ = parse_itanium_nested_name(match.group(2))
- if names and names[-2][1]:
- name = ''.join([x for x,_ in names])
- except TooComplexName:
- # Manglings that are too complex should already have been
- # filtered out, but if we happen to somehow see one here
- # just leave it as-is.
- pass
- if name:
- old_count = template_function_count.setdefault(name,0)
- template_function_count[name] = old_count + 1
- template_function_mapping[k] = name
- else:
- template_function_mapping[k] = ""
+ symbol_defs = dict()
+ symbol_refs = set()
+ for (this_lib_defs, this_lib_refs) in libs_symbols:
+ for k,v in list(this_lib_defs.items()):
+ symbol_defs[k] = v + symbol_defs.setdefault(k,0)
+ for sym in list(this_lib_refs):
+ symbol_refs.add(sym)
+
+ # Find which template instantiations are referenced at least once.
+ template_instantiation_refs = set()
+ for sym in list(symbol_refs):
+ template = get_template_name(sym, args.mangling)
+ if template:
+ template_instantiation_refs.add(template)
# Print symbols which both:
# * Appear in exactly one input, as symbols defined in multiple
# objects/libraries are assumed to have public definitions.
- # * Aren't instances of member functions of templates which have been
- # instantiated 100 times or more, which are assumed to have public
- # definitions. (100 is an arbitrary guess here.)
+ # * Are not a template instantiation that isn't referenced anywhere. This
+ # is because we need to export any explicitly instantiated templates,
+ # and we expect those to be referenced in some object.
if args.o:
outfile = open(args.o,'w')
else:
outfile = sys.stdout
- for k,v in list(symbols.items()):
- template_count = template_function_count[template_function_mapping[k]]
- if v == 1 and template_count < 100:
+ for k,v in list(symbol_defs.items()):
+ template = get_template_name(k, args.mangling)
+ if v == 1 and (not template or template in template_instantiation_refs):
print(k, file=outfile)
--
2.39.0.1.g6739ec1790

View File

@ -1,308 +0,0 @@
From 57dc16fbe3078b4f27e804a2a3a6594e46897c3c Mon Sep 17 00:00:00 2001
From: Mike Hommey <mh@glandium.org>
Date: Sat, 3 Jun 2023 09:31:42 +0200
Subject: [PATCH] [llvm] Strip stabs symbols in Mach-O when stripping debug
info
---
llvm/lib/ObjCopy/MachO/MachOObjcopy.cpp | 3 +
.../MachO/Inputs/strip-stabs.yaml | 248 ++++++++++++++++++
.../tools/llvm-objcopy/MachO/strip-stabs.test | 17 ++
3 files changed, 268 insertions(+)
create mode 100644 llvm/test/tools/llvm-objcopy/MachO/Inputs/strip-stabs.yaml
create mode 100644 llvm/test/tools/llvm-objcopy/MachO/strip-stabs.test
diff --git a/llvm/lib/ObjCopy/MachO/MachOObjcopy.cpp b/llvm/lib/ObjCopy/MachO/MachOObjcopy.cpp
index d37241682efe..e26b363df21c 100644
--- a/llvm/lib/ObjCopy/MachO/MachOObjcopy.cpp
+++ b/llvm/lib/ObjCopy/MachO/MachOObjcopy.cpp
@@ -112,6 +112,9 @@ static void updateAndRemoveSymbols(const CommonConfig &Config,
if (Config.DiscardMode == DiscardType::All && !(N->n_type & MachO::N_EXT))
return true;
// This behavior is consistent with cctools' strip.
+ if (Config.StripDebug && (N->n_type & MachO::N_STAB))
+ return true;
+ // This behavior is consistent with cctools' strip.
if (MachOConfig.StripSwiftSymbols &&
(Obj.Header.Flags & MachO::MH_DYLDLINK) && Obj.SwiftVersion &&
*Obj.SwiftVersion && N->isSwiftSymbol())
diff --git a/llvm/test/tools/llvm-objcopy/MachO/Inputs/strip-stabs.yaml b/llvm/test/tools/llvm-objcopy/MachO/Inputs/strip-stabs.yaml
new file mode 100644
index 000000000000..3259aa228fed
--- /dev/null
+++ b/llvm/test/tools/llvm-objcopy/MachO/Inputs/strip-stabs.yaml
@@ -0,0 +1,248 @@
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x1000007
+ cpusubtype: 0x80000003
+ filetype: 0x2
+ ncmds: 13
+ sizeofcmds: 808
+ flags: 0x200085
+ reserved: 0x0
+LoadCommands:
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __PAGEZERO
+ vmaddr: 0
+ vmsize: 4294967296
+ fileoff: 0
+ filesize: 0
+ maxprot: 0
+ initprot: 0
+ nsects: 0
+ flags: 0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 312
+ segname: __TEXT
+ vmaddr: 4294967296
+ vmsize: 8192
+ fileoff: 0
+ filesize: 8192
+ maxprot: 5
+ initprot: 5
+ nsects: 3
+ flags: 0
+ Sections:
+ - sectname: __text
+ segname: __TEXT
+ addr: 0x100000370
+ size: 8
+ offset: 0x370
+ align: 4
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 554889E531C05DC3
+ - sectname: __unwind_info
+ segname: __TEXT
+ addr: 0x100000378
+ size: 4152
+ offset: 0x378
+ align: 2
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ - sectname: __eh_frame
+ segname: __TEXT
+ addr: 0x1000013B0
+ size: 24
+ offset: 0x13B0
+ align: 3
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x6000000B
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 1400000000000000017A520001781001100C070890010000
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __LINKEDIT
+ vmaddr: 4294975488
+ vmsize: 272
+ fileoff: 8192
+ filesize: 272
+ maxprot: 1
+ initprot: 1
+ nsects: 0
+ flags: 0
+ - cmd: LC_DYLD_INFO_ONLY
+ cmdsize: 48
+ rebase_off: 0
+ rebase_size: 0
+ bind_off: 0
+ bind_size: 0
+ weak_bind_off: 0
+ weak_bind_size: 0
+ lazy_bind_off: 0
+ lazy_bind_size: 0
+ export_off: 8192
+ export_size: 48
+ - cmd: LC_SYMTAB
+ cmdsize: 24
+ symoff: 8248
+ nsyms: 8
+ stroff: 8376
+ strsize: 88
+ - cmd: LC_DYSYMTAB
+ cmdsize: 80
+ ilocalsym: 0
+ nlocalsym: 5
+ iextdefsym: 5
+ nextdefsym: 2
+ iundefsym: 7
+ nundefsym: 1
+ tocoff: 0
+ ntoc: 0
+ modtaboff: 0
+ nmodtab: 0
+ extrefsymoff: 0
+ nextrefsyms: 0
+ indirectsymoff: 0
+ nindirectsyms: 0
+ extreloff: 0
+ nextrel: 0
+ locreloff: 0
+ nlocrel: 0
+ - cmd: LC_LOAD_DYLINKER
+ cmdsize: 32
+ name: 12
+ Content: '/usr/lib/dyld'
+ ZeroPadBytes: 7
+ - cmd: LC_UUID
+ cmdsize: 24
+ uuid: 4C4C44DE-5555-3144-A19D-79B149A02D5F
+ - cmd: LC_BUILD_VERSION
+ cmdsize: 32
+ platform: 1
+ minos: 852736
+ sdk: 852736
+ ntools: 1
+ Tools:
+ - tool: 3
+ version: 1048580
+ - cmd: LC_MAIN
+ cmdsize: 24
+ entryoff: 880
+ stacksize: 0
+ - cmd: LC_LOAD_DYLIB
+ cmdsize: 56
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 86467587
+ compatibility_version: 65536
+ Content: '/usr/lib/libSystem.B.dylib'
+ ZeroPadBytes: 6
+ - cmd: LC_FUNCTION_STARTS
+ cmdsize: 16
+ dataoff: 8240
+ datasize: 8
+ - cmd: LC_DATA_IN_CODE
+ cmdsize: 16
+ dataoff: 8248
+ datasize: 0
+LinkEditData:
+ ExportTrie:
+ TerminalSize: 0
+ NodeOffset: 0
+ Name: ''
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 0
+ NodeOffset: 5
+ Name: _
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 33
+ Name: main
+ Flags: 0x0
+ Address: 0x370
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 2
+ NodeOffset: 38
+ Name: _mh_execute_header
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ NameList:
+ - n_strx: 45
+ n_type: 0x64
+ n_sect: 0
+ n_desc: 0
+ n_value: 0
+ - n_strx: 57
+ n_type: 0x66
+ n_sect: 3
+ n_desc: 1
+ n_value: 0
+ - n_strx: 76
+ n_type: 0x24
+ n_sect: 1
+ n_desc: 0
+ n_value: 4294968176
+ - n_strx: 1
+ n_type: 0x24
+ n_sect: 0
+ n_desc: 0
+ n_value: 8
+ - n_strx: 1
+ n_type: 0x64
+ n_sect: 1
+ n_desc: 0
+ n_value: 0
+ - n_strx: 2
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 0
+ n_value: 4294968176
+ - n_strx: 25
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 16
+ n_value: 4294967296
+ - n_strx: 8
+ n_type: 0x1
+ n_sect: 0
+ n_desc: 256
+ n_value: 0
+ StringTable:
+ - ' '
+ - _main
+ - dyld_stub_binder
+ - __mh_execute_header
+ - '/tmp/test.c'
+ - '/tmp/test-6aa924.o'
+ - _main
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
+ FunctionStarts: [ 0x370 ]
+...
diff --git a/llvm/test/tools/llvm-objcopy/MachO/strip-stabs.test b/llvm/test/tools/llvm-objcopy/MachO/strip-stabs.test
new file mode 100644
index 000000000000..90c00f60a152
--- /dev/null
+++ b/llvm/test/tools/llvm-objcopy/MachO/strip-stabs.test
@@ -0,0 +1,17 @@
+## Show that llvm-objcopy/llvm-strip stabs symbols and debug sections.
+
+# RUN: yaml2obj %p/Inputs/strip-stabs.yaml -o %t
+
+# RUN: llvm-objcopy --strip-debug %t %t.stripped
+
+## Make sure that stabs symbols are stripped.
+# RUN: llvm-readobj --symbols %t | FileCheck %s --check-prefix=SYM
+# RUN: llvm-readobj --symbols %t.stripped | FileCheck %s --check-prefix=SYM_STRIP
+
+# SYM: Symbols [
+# SYM-COUNT-5: Type: SymDebugTable ({{.*}})
+# SYM: ]
+
+# SYM_STRIP: Symbols [
+# SYM_STRIP-NOT: Type: SymDebugTable ({{.*}})
+# SYM_STRIP: ]
--
2.41.0.6.ge371d37104

View File

@ -1,99 +0,0 @@
From 8198f30f7e756e3368c3eda62ecc3d0cc62d1570 Mon Sep 17 00:00:00 2001
From: Jez Ng <jezng@fb.com>
Date: Tue, 14 Feb 2023 14:34:19 -0500
Subject: [PATCH] [lld-macho] Account for alignment in thunk insertion
algorithm
We previously neglected this, leading us to underestimate the maximum
possible branch address offset.
Fixing this should allow us to reduce `slop` to more reasonable
levels. I've lowered it to 256 for now, though I suspect we could go
lower.
Fixes https://github.com/llvm/llvm-project/issues/59259.
Reviewed By: serge-sans-paille
Differential Revision: https://reviews.llvm.org/D144029
---
lld/MachO/ConcatOutputSection.cpp | 10 +++--
lld/test/MachO/arm64-thunk-for-alignment.s | 44 ++++++++++++++++++++++
2 files changed, 51 insertions(+), 3 deletions(-)
create mode 100644 lld/test/MachO/arm64-thunk-for-alignment.s
diff --git a/lld/MachO/ConcatOutputSection.cpp b/lld/MachO/ConcatOutputSection.cpp
index cbd3a2492d25..b522bd9b289e 100644
--- a/lld/MachO/ConcatOutputSection.cpp
+++ b/lld/MachO/ConcatOutputSection.cpp
@@ -246,10 +246,14 @@ void TextOutputSection::finalize() {
// contains several branch instructions in succession, then the distance
// from the current position to the position where the thunks are inserted
// grows. So leave room for a bunch of thunks.
- unsigned slop = 1024 * thunkSize;
- while (finalIdx < endIdx && addr + size + inputs[finalIdx]->getSize() <
- isecVA + forwardBranchRange - slop)
+ unsigned slop = 256 * thunkSize;
+ while (finalIdx < endIdx) {
+ size_t expectedNewSize = alignTo(addr + size, inputs[finalIdx]->align) +
+ inputs[finalIdx]->getSize();
+ if (expectedNewSize >= isecVA + forwardBranchRange - slop)
+ break;
finalizeOne(inputs[finalIdx++]);
+ }
if (!isec->hasCallSites)
continue;
diff --git a/lld/test/MachO/arm64-thunk-for-alignment.s b/lld/test/MachO/arm64-thunk-for-alignment.s
new file mode 100644
index 000000000000..f497b81f705b
--- /dev/null
+++ b/lld/test/MachO/arm64-thunk-for-alignment.s
@@ -0,0 +1,44 @@
+# REQUIRES: aarch64
+# RUN: rm -rf %t; split-file %s %t
+# RUN: llvm-mc -filetype=obj -triple=arm64-apple-darwin %t/foo.s -o %t/foo.o
+# RUN: llvm-mc -filetype=obj -triple=arm64-apple-darwin %t/bar.s -o %t/bar.o
+# RUN: %lld -dylib -arch arm64 -lSystem -o %t/out %t/foo.o %t/bar.o
+
+# RUN: llvm-objdump --macho --syms %t/out | FileCheck %s
+# CHECK: _bar.thunk.0
+
+## Regression test for PR59259. Previously, we neglected to check section
+## alignments when deciding when to create thunks.
+
+## If we ignore alignment, the total size of _spacer1 + _spacer2 below is just
+## under the limit at which we attempt to insert thunks between the spacers.
+## However, with alignment accounted for, their total size ends up being
+## 0x8000000, which is just above the max forward branch range, making thunk
+## insertion necessary. Thus, not accounting for alignment led to an error.
+
+#--- foo.s
+
+_foo:
+ b _bar
+
+## Size of a `b` instruction.
+.equ callSize, 4
+## Refer to `slop` in TextOutputSection::finalize().
+.equ slopSize, 12 * 256
+
+_spacer1:
+ .space 0x4000000 - slopSize - 2 * callSize - 1
+
+.subsections_via_symbols
+
+#--- bar.s
+.globl _bar
+
+.p2align 14
+_spacer2:
+ .space 0x4000000
+
+_bar:
+ ret
+
+.subsections_via_symbols
--
2.39.0.1.g6739ec1790

View File

@ -1,45 +0,0 @@
From 1b9fbc81ff15f6ad5a0e7f29c486c6edd0bce94c Mon Sep 17 00:00:00 2001
From: Mike Hommey <mh@glandium.org>
Date: Thu, 26 Jan 2023 21:28:09 +0100
Subject: [PATCH] [extract_symbols.py] Filter out more symbols for MSVC
This strips out about 5k symbols.
Fixes https://github.com/llvm/llvm-project/issues/60109
Reviewed By: john.brawn
Differential Revision: https://reviews.llvm.org/D142431
---
llvm/utils/extract_symbols.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/llvm/utils/extract_symbols.py b/llvm/utils/extract_symbols.py
index 0f8e8ba64c80..298ee6ba4eeb 100755
--- a/llvm/utils/extract_symbols.py
+++ b/llvm/utils/extract_symbols.py
@@ -141,7 +141,10 @@ def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
# Remove calling convention decoration from names
match = re.match('[_@]([^@]+)', symbol)
if match:
- return match.group(1)
+ symbol = match.group(1)
+ # Discard floating point/SIMD constants.
+ if symbol.startswith(("__xmm@", "__ymm@", "__real@")):
+ return None
return symbol
# Function template instantiations start with ?$; keep the instantiations of
# clang::Type::getAs, as some of them are explipict specializations that are
@@ -165,6 +168,9 @@ def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
# namespace doesn't exist outside of that translation unit.
elif re.search('\?A(0x\w+)?@', symbol):
return None
+ # Skip X86GenMnemonicTables functions, they are not exposed from llvm/include/.
+ elif re.match('\?is[A-Z0-9]*@X86@llvm', symbol):
+ return None
# Keep mangled llvm:: and clang:: function symbols. How we detect these is a
# bit of a mess and imprecise, but that avoids having to completely demangle
# the symbol name. The outermost namespace is at the end of the identifier
--
2.39.0.1.g6739ec1790

View File

@ -1,98 +0,0 @@
From 39e048e60ae2153f7621b7d1a1949dcb69778fa1 Mon Sep 17 00:00:00 2001
From: Michael Platings <michael.platings@arm.com>
Date: Mon, 6 Mar 2023 22:53:54 +0000
Subject: [PATCH] Use LLVM_USE_SYMLINKS option in install_symlink
The change to potentially use symlinks on Windows was added in
https://reviews.llvm.org/D99170.
LLVM_USE_SYMLINKS was added more recently in
https://reviews.llvm.org/D135578 and allows specifying at configure time
whether or not symlinks should be created. The benefit of using this
option is it allows building the package on a symlink-capable Windows
machine with symlinks disabled so that the resulting package can be used
on a Windows machine that doesn't support symlinks.
Differential Revision: https://reviews.llvm.org/D145443
---
llvm/cmake/modules/AddLLVM.cmake | 16 ++++++++++++++--
llvm/cmake/modules/LLVMInstallSymlink.cmake | 14 ++++++--------
2 files changed, 20 insertions(+), 10 deletions(-)
diff --git a/llvm/cmake/modules/AddLLVM.cmake b/llvm/cmake/modules/AddLLVM.cmake
index 76695e69e740..fa23bf1d883a 100644
--- a/llvm/cmake/modules/AddLLVM.cmake
+++ b/llvm/cmake/modules/AddLLVM.cmake
@@ -2008,13 +2008,19 @@ function(llvm_install_library_symlink name dest type)
set(full_name ${CMAKE_${type}_LIBRARY_PREFIX}${name}${CMAKE_${type}_LIBRARY_SUFFIX})
set(full_dest ${CMAKE_${type}_LIBRARY_PREFIX}${dest}${CMAKE_${type}_LIBRARY_SUFFIX})
+ if(LLVM_USE_SYMLINKS)
+ set(LLVM_LINK_OR_COPY create_symlink)
+ else()
+ set(LLVM_LINK_OR_COPY copy)
+ endif()
+
set(output_dir lib${LLVM_LIBDIR_SUFFIX})
if(WIN32 AND "${type}" STREQUAL "SHARED")
set(output_dir "${CMAKE_INSTALL_BINDIR}")
endif()
install(SCRIPT ${INSTALL_SYMLINK}
- CODE "install_symlink(\"${full_name}\" \"${full_dest}\" \"${output_dir}\")"
+ CODE "install_symlink(\"${full_name}\" \"${full_dest}\" \"${output_dir}\" \"${LLVM_LINK_OR_COPY}\")"
COMPONENT ${component})
endfunction()
@@ -2049,10 +2055,16 @@ function(llvm_install_symlink project name dest)
set(full_dest llvm${CMAKE_EXECUTABLE_SUFFIX})
endif()
+ if(LLVM_USE_SYMLINKS)
+ set(LLVM_LINK_OR_COPY create_symlink)
+ else()
+ set(LLVM_LINK_OR_COPY copy)
+ endif()
+
set(output_dir "${${project}_TOOLS_INSTALL_DIR}")
install(SCRIPT ${INSTALL_SYMLINK}
- CODE "install_symlink(\"${full_name}\" \"${full_dest}\" \"${output_dir}\")"
+ CODE "install_symlink(\"${full_name}\" \"${full_dest}\" \"${output_dir}\" \"${LLVM_LINK_OR_COPY}\")"
COMPONENT ${component})
if (NOT LLVM_ENABLE_IDE AND NOT ARG_ALWAYS_GENERATE)
diff --git a/llvm/cmake/modules/LLVMInstallSymlink.cmake b/llvm/cmake/modules/LLVMInstallSymlink.cmake
index e9be04aceb3d..fb61265543d1 100644
--- a/llvm/cmake/modules/LLVMInstallSymlink.cmake
+++ b/llvm/cmake/modules/LLVMInstallSymlink.cmake
@@ -4,7 +4,10 @@
set(CMAKE_INSTALL_LIBDIR "lib")
include(GNUInstallDirs)
-function(install_symlink name target outdir)
+function(install_symlink name target outdir link_or_copy)
+ # link_or_copy is the "command" to pass to cmake -E.
+ # It should be either "create_symlink" or "copy".
+
set(DESTDIR $ENV{DESTDIR})
if(NOT IS_ABSOLUTE "${outdir}")
set(outdir "${CMAKE_INSTALL_PREFIX}/${outdir}")
@@ -14,12 +17,7 @@ function(install_symlink name target outdir)
message(STATUS "Creating ${name}")
execute_process(
- COMMAND "${CMAKE_COMMAND}" -E create_symlink "${target}" "${name}"
- WORKING_DIRECTORY "${outdir}" ERROR_VARIABLE has_err)
- if(CMAKE_HOST_WIN32 AND has_err)
- execute_process(
- COMMAND "${CMAKE_COMMAND}" -E copy "${target}" "${name}"
- WORKING_DIRECTORY "${outdir}")
- endif()
+ COMMAND "${CMAKE_COMMAND}" -E ${link_or_copy} "${target}" "${name}"
+ WORKING_DIRECTORY "${outdir}")
endfunction()
--
2.39.0.1.g6739ec1790

View File

@ -1,395 +0,0 @@
From 415b1cfd57de62da8af9ad8dc567fc9d918dbaa5 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston@google.com>
Date: Mon, 3 Apr 2023 21:14:40 +0000
Subject: [PATCH] Add __sanitizer_get_allocated_begin API and implementations
This function will return the start of the allocation, if given a pointer that lies within an allocation. Otherwise, it returns NULL.
It will be useful for detecting dynamic TLS allocations in glibc >=2.25, which
uses malloc (see https://github.com/google/sanitizers/issues/1409#issuecomment-1214244142).
Reviewed By: vitalybuka
Differential Revision: https://reviews.llvm.org/D147005
---
.../include/sanitizer/allocator_interface.h | 4 ++
compiler-rt/lib/asan/asan_allocator.cpp | 15 +++++
compiler-rt/lib/dfsan/dfsan_allocator.cpp | 18 ++++++
compiler-rt/lib/hwasan/hwasan_allocator.cpp | 21 +++++++
compiler-rt/lib/lsan/lsan_allocator.cpp | 21 +++++++
compiler-rt/lib/memprof/memprof_allocator.cpp | 16 +++++
compiler-rt/lib/msan/msan_allocator.cpp | 19 ++++++
.../sanitizer_allocator_interface.h | 2 +
.../sanitizer_allocator_internal.h | 3 +-
.../sanitizer_common_interface.inc | 1 +
compiler-rt/lib/tsan/rtl/tsan_mman.cpp | 18 ++++++
.../TestCases/get_allocated_begin.cpp | 58 +++++++++++++++++++
12 files changed, 195 insertions(+), 1 deletion(-)
create mode 100644 compiler-rt/test/sanitizer_common/TestCases/get_allocated_begin.cpp
diff --git a/compiler-rt/include/sanitizer/allocator_interface.h b/compiler-rt/include/sanitizer/allocator_interface.h
index 6226135ef84b3..d846f3f330741 100644
--- a/compiler-rt/include/sanitizer/allocator_interface.h
+++ b/compiler-rt/include/sanitizer/allocator_interface.h
@@ -26,6 +26,10 @@ extern "C" {
is not yet freed. */
int __sanitizer_get_ownership(const volatile void *p);
+ /* If a pointer lies within an allocation, it will return the start address
+ of the allocation. Otherwise, it returns nullptr. */
+ void *__sanitizer_get_allocated_begin(const void *p);
+
/* Returns the number of bytes reserved for the pointer p.
Requires (get_ownership(p) == true) or (p == 0). */
size_t __sanitizer_get_allocated_size(const volatile void *p);
diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp
index 4c52a45b875c7..4b65b44a88f91 100644
--- a/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/compiler-rt/lib/asan/asan_allocator.cpp
@@ -1164,6 +1164,17 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
+void *AllocationBegin(const void *p) {
+ AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p);
+ if (!m)
+ return nullptr;
+ if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
+ return nullptr;
+ if (m->UsedSize() == 0)
+ return nullptr;
+ return (void *)(m->Beg());
+}
+
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size". We don't want to expose our redzone sizes, etc here.
uptr __sanitizer_get_estimated_allocated_size(uptr size) {
@@ -1187,6 +1198,10 @@ uptr __sanitizer_get_allocated_size(const void *p) {
return allocated_size;
}
+void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
void __sanitizer_purge_allocator() {
GET_STACK_TRACE_MALLOC;
instance.Purge(&stack);
diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
index 5fb8fef213b9a..cebf9983c9490 100644
--- a/compiler-rt/lib/dfsan/dfsan_allocator.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
@@ -174,6 +174,20 @@ void *DFsanCalloc(uptr nmemb, uptr size) {
return DFsanAllocate(nmemb * size, sizeof(u64), true /*zeroise*/);
}
+void *AllocationBegin(const void *p) {
+ if (!p)
+ return nullptr;
+ const void *beg = allocator.GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ if (!b)
+ return nullptr;
+ if (b->requested_size == 0)
+ return nullptr;
+ return (void *)beg;
+}
+
static uptr AllocationSize(const void *p) {
if (!p)
return 0;
@@ -294,4 +308,8 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index d096a8faa2c7e..8ccdeb23fa995 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -397,6 +397,23 @@ HwasanChunkView FindHeapChunkByAddress(uptr address) {
return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
}
+void *AllocationBegin(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
+ if (!untagged_ptr)
+ return nullptr;
+
+ const void *beg = allocator.GetBlockBegin(untagged_ptr);
+ if (!beg)
+ return nullptr;
+
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ if (b->GetRequestedSize() == 0)
+ return nullptr;
+
+ tag_t tag = GetTagFromPointer((uptr)p);
+ return (void *)AddTagToPointer((uptr)beg, tag);
+}
+
static uptr AllocationSize(const void *tagged_ptr) {
const void *untagged_ptr = UntagPtr(tagged_ptr);
if (!untagged_ptr) return 0;
@@ -641,4 +658,8 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
diff --git a/compiler-rt/lib/lsan/lsan_allocator.cpp b/compiler-rt/lib/lsan/lsan_allocator.cpp
index 37ba363d479dd..d50882657dc33 100644
--- a/compiler-rt/lib/lsan/lsan_allocator.cpp
+++ b/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -145,6 +145,22 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end) {
*end = *begin + sizeof(AllocatorCache);
}
+void *GetMallocBegin(const void *p) {
+ if (!p)
+ return nullptr;
+ const void *beg = allocator.GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+ ChunkMetadata *m = Metadata(beg);
+ if (!m)
+ return nullptr;
+ if (!m->allocated)
+ return nullptr;
+ if (m->requested_size == 0)
+ return nullptr;
+ return (void *)beg;
+}
+
uptr GetMallocUsableSize(const void *p) {
if (!p)
return 0;
@@ -363,6 +379,11 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_get_allocated_begin(const void *p) {
+ return GetMallocBegin(p);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_allocated_size(const void *p) {
return GetMallocUsableSize(p);
diff --git a/compiler-rt/lib/memprof/memprof_allocator.cpp b/compiler-rt/lib/memprof/memprof_allocator.cpp
index 51c3a66ebd680..80a87d49dfc6e 100644
--- a/compiler-rt/lib/memprof/memprof_allocator.cpp
+++ b/compiler-rt/lib/memprof/memprof_allocator.cpp
@@ -681,6 +681,18 @@ int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,
return 0;
}
+void *memprof_malloc_begin(const void *p) {
+ u64 user_requested_size;
+ MemprofChunk *m =
+ instance.GetMemprofChunkByAddr((uptr)p, user_requested_size);
+ if (!m)
+ return nullptr;
+ if (user_requested_size == 0)
+ return nullptr;
+
+ return (void *)m->Beg();
+}
+
uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
if (!ptr)
return 0;
@@ -699,6 +711,10 @@ int __sanitizer_get_ownership(const void *p) {
return memprof_malloc_usable_size(p, 0, 0) != 0;
}
+void *__sanitizer_get_allocated_begin(const void *p) {
+ return memprof_malloc_begin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) {
return memprof_malloc_usable_size(p, 0, 0);
}
diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp
index 3308ee7053a61..a760a434158a5 100644
--- a/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/compiler-rt/lib/msan/msan_allocator.cpp
@@ -260,6 +260,21 @@ static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
}
+void *AllocationBegin(const void *p) {
+ if (!p)
+ return nullptr;
+ const void *beg = allocator.GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ if (!b)
+ return nullptr;
+ if (b->requested_size == 0)
+ return nullptr;
+
+ return (void *)beg;
+}
+
static uptr AllocationSize(const void *p) {
if (!p) return 0;
const void *beg = allocator.GetBlockBegin(p);
@@ -373,4 +388,8 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
index c1b27563e2fc7..35c7c97df3299 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
@@ -21,6 +21,8 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *
+__sanitizer_get_allocated_begin(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr
__sanitizer_get_allocated_size(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
index 38994736877ac..adbdad5a1ee0c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -51,7 +51,8 @@ void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
void InternalAllocatorLock();
void InternalAllocatorUnlock();
InternalAllocator *internal_allocator();
-
+int __sanitizer_get_allocation_bounds(const void *p, void **start,
+ unsigned long long *size);
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
index 958f071e7b5f7..01be600e33ba3 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
@@ -32,6 +32,7 @@ INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
// Allocator interface.
+INTERFACE_FUNCTION(__sanitizer_get_allocated_begin)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 99fa492265615..9c548dfff91f3 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -352,6 +352,20 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
}
+void *user_alloc_begin(const void *p) {
+ if (p == nullptr || !IsAppMem((uptr)p))
+ return nullptr;
+ const void *beg = allocator()->GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+
+ MBlock *b = ctx->metamap.GetBlock((uptr)beg);
+ if (!b)
+ return nullptr; // Not a valid pointer.
+
+ return (void *)beg;
+}
+
uptr user_alloc_usable_size(const void *p) {
if (p == 0 || !IsAppMem((uptr)p))
return 0;
@@ -430,6 +444,10 @@ int __sanitizer_get_ownership(const void *p) {
return allocator()->GetBlockBegin(p) != 0;
}
+void *__sanitizer_get_allocated_begin(const void *p) {
+ return user_alloc_begin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) {
return user_alloc_usable_size(p);
}
diff --git a/compiler-rt/test/sanitizer_common/TestCases/get_allocated_begin.cpp b/compiler-rt/test/sanitizer_common/TestCases/get_allocated_begin.cpp
new file mode 100644
index 0000000000000..6892a4a7fb282
--- /dev/null
+++ b/compiler-rt/test/sanitizer_common/TestCases/get_allocated_begin.cpp
@@ -0,0 +1,58 @@
+// RUN: %clangxx -O0 -g %s -o %t && %run %t
+
+// UBSan does not have its own allocator
+// UNSUPPORTED: ubsan
+
+#include <assert.h>
+#include <sanitizer/allocator_interface.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+// Based on lib/msan/tests/msan_test.cpp::get_allocated_size_and_ownership
+int main(void) {
+ int sizes[] = {10, 100, 1000, 10000, 100000, 1000000};
+
+ for (int i = 0; i < sizeof(sizes) / sizeof(int); i++) {
+ printf("Testing size %d\n", sizes[i]);
+
+ char *array = reinterpret_cast<char *>(malloc(sizes[i]));
+ int *int_ptr = new int;
+ printf("array: %p\n", array);
+ printf("int_ptr: %p\n", int_ptr);
+
+ // Bogus value to unpoison start. Calling __sanitizer_get_allocated_begin
+ // does not unpoison it.
+ void *start = NULL;
+ for (int j = 0; j < sizes[i]; j++) {
+ printf("j: %d\n", j);
+
+ start = __sanitizer_get_allocated_begin(array + j);
+ printf("Start: %p (expected: %p)\n", start, array);
+ fflush(stdout);
+ assert(array == start);
+ }
+
+ start = __sanitizer_get_allocated_begin(int_ptr);
+ assert(int_ptr == start);
+
+ void *wild_addr = reinterpret_cast<void *>(4096 * 160);
+ assert(__sanitizer_get_allocated_begin(wild_addr) == NULL);
+
+ wild_addr = reinterpret_cast<void *>(0x1);
+ assert(__sanitizer_get_allocated_begin(wild_addr) == NULL);
+
+ // NULL is a valid argument for GetAllocatedSize but is not owned.
+ assert(__sanitizer_get_allocated_begin(NULL) == NULL);
+
+ free(array);
+ for (int j = 0; j < sizes[i]; j++) {
+ assert(__sanitizer_get_allocated_begin(array + j) == NULL);
+ }
+
+ delete int_ptr;
+ assert(__sanitizer_get_allocated_begin(int_ptr) == NULL);
+ }
+
+ return 0;
+}

View File

@ -1,68 +0,0 @@
From c81a322476a1b1c57ca72832e10c43663557e097 Mon Sep 17 00:00:00 2001
From: Jie Fu <jiefu@tencent.com>
Date: Tue, 4 Apr 2023 07:40:34 +0800
Subject: [PATCH] [compiler-rt] Fix -Wcast-qual after D147005 (NFC)
/home/jiefu/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp:161:18: error: cast from 'const void *' to 'void *' drops const qualifier [-Werror,-Wcast-qual]
return (void *)beg;
^
1 error generated.
---
compiler-rt/lib/dfsan/dfsan_allocator.cpp | 2 +-
compiler-rt/lib/lsan/lsan_allocator.cpp | 2 +-
compiler-rt/lib/msan/msan_allocator.cpp | 2 +-
compiler-rt/lib/tsan/rtl/tsan_mman.cpp | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
index cebf9983c9490..7ae6024fb2c9d 100644
--- a/compiler-rt/lib/dfsan/dfsan_allocator.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
@@ -177,7 +177,7 @@ void *DFsanCalloc(uptr nmemb, uptr size) {
void *AllocationBegin(const void *p) {
if (!p)
return nullptr;
- const void *beg = allocator.GetBlockBegin(p);
+ void *beg = allocator.GetBlockBegin(p);
if (!beg)
return nullptr;
Metadata *b = (Metadata *)allocator.GetMetaData(beg);
diff --git a/compiler-rt/lib/lsan/lsan_allocator.cpp b/compiler-rt/lib/lsan/lsan_allocator.cpp
index d50882657dc33..b0a54d7cd9bc5 100644
--- a/compiler-rt/lib/lsan/lsan_allocator.cpp
+++ b/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -148,7 +148,7 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end) {
void *GetMallocBegin(const void *p) {
if (!p)
return nullptr;
- const void *beg = allocator.GetBlockBegin(p);
+ void *beg = allocator.GetBlockBegin(p);
if (!beg)
return nullptr;
ChunkMetadata *m = Metadata(beg);
diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp
index a760a434158a5..08ec3314b26e6 100644
--- a/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/compiler-rt/lib/msan/msan_allocator.cpp
@@ -263,7 +263,7 @@ static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
void *AllocationBegin(const void *p) {
if (!p)
return nullptr;
- const void *beg = allocator.GetBlockBegin(p);
+ void *beg = allocator.GetBlockBegin(p);
if (!beg)
return nullptr;
Metadata *b = (Metadata *)allocator.GetMetaData(beg);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 9c548dfff91f3..3cc4d16955ede 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -355,7 +355,7 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
void *user_alloc_begin(const void *p) {
if (p == nullptr || !IsAppMem((uptr)p))
return nullptr;
- const void *beg = allocator()->GetBlockBegin(p);
+ void *beg = allocator()->GetBlockBegin(p);
if (!beg)
return nullptr;

View File

@ -1,279 +0,0 @@
From d644ab022a7be985255db29fd466798e9b138bee Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston@google.com>
Date: Tue, 4 Apr 2023 00:42:37 +0000
Subject: [PATCH] Update __sanitizer_get_allocated_begin to return const void*
D147005 introduced __sanitizer_get_allocated_begin, with a return
value of void*. This involved a few naughty casts that dropped the
const. This patch adds back the const qualifier.
Differential Revision: https://reviews.llvm.org/D147489
---
compiler-rt/include/sanitizer/allocator_interface.h | 2 +-
compiler-rt/lib/asan/asan_allocator.cpp | 6 +++---
compiler-rt/lib/dfsan/dfsan_allocator.cpp | 6 +++---
compiler-rt/lib/hwasan/hwasan_allocator.cpp | 6 +++---
compiler-rt/lib/lsan/lsan_allocator.cpp | 6 +++---
compiler-rt/lib/memprof/memprof_allocator.cpp | 6 +++---
compiler-rt/lib/msan/msan_allocator.cpp | 6 +++---
.../lib/sanitizer_common/sanitizer_allocator_interface.h | 2 +-
compiler-rt/lib/tsan/rtl/tsan_mman.cpp | 6 +++---
.../test/sanitizer_common/TestCases/get_allocated_begin.cpp | 2 +-
10 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/compiler-rt/include/sanitizer/allocator_interface.h b/compiler-rt/include/sanitizer/allocator_interface.h
index d846f3f330741..d0cfce79c1aef 100644
--- a/compiler-rt/include/sanitizer/allocator_interface.h
+++ b/compiler-rt/include/sanitizer/allocator_interface.h
@@ -28,7 +28,7 @@ extern "C" {
/* If a pointer lies within an allocation, it will return the start address
of the allocation. Otherwise, it returns nullptr. */
- void *__sanitizer_get_allocated_begin(const void *p);
+ const void *__sanitizer_get_allocated_begin(const void *p);
/* Returns the number of bytes reserved for the pointer p.
Requires (get_ownership(p) == true) or (p == 0). */
diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp
index 4b65b44a88f91..708d975a93dcf 100644
--- a/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/compiler-rt/lib/asan/asan_allocator.cpp
@@ -1164,7 +1164,7 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
-void *AllocationBegin(const void *p) {
+const void *AllocationBegin(const void *p) {
AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p);
if (!m)
return nullptr;
@@ -1172,7 +1172,7 @@ void *AllocationBegin(const void *p) {
return nullptr;
if (m->UsedSize() == 0)
return nullptr;
- return (void *)(m->Beg());
+ return (const void *)(m->Beg());
}
// ASan allocator doesn't reserve extra bytes, so normally we would
@@ -1198,7 +1198,7 @@ uptr __sanitizer_get_allocated_size(const void *p) {
return allocated_size;
}
-void *__sanitizer_get_allocated_begin(const void *p) {
+const void *__sanitizer_get_allocated_begin(const void *p) {
return AllocationBegin(p);
}
diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
index 7ae6024fb2c9d..36346d163d982 100644
--- a/compiler-rt/lib/dfsan/dfsan_allocator.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
@@ -174,7 +174,7 @@ void *DFsanCalloc(uptr nmemb, uptr size) {
return DFsanAllocate(nmemb * size, sizeof(u64), true /*zeroise*/);
}
-void *AllocationBegin(const void *p) {
+const void *AllocationBegin(const void *p) {
if (!p)
return nullptr;
void *beg = allocator.GetBlockBegin(p);
@@ -185,7 +185,7 @@ void *AllocationBegin(const void *p) {
return nullptr;
if (b->requested_size == 0)
return nullptr;
- return (void *)beg;
+ return (const void *)beg;
}
static uptr AllocationSize(const void *p) {
@@ -308,7 +308,7 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
-void *__sanitizer_get_allocated_begin(const void *p) {
+const void *__sanitizer_get_allocated_begin(const void *p) {
return AllocationBegin(p);
}
diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index 8ccdeb23fa995..994a580dc95e0 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -397,7 +397,7 @@ HwasanChunkView FindHeapChunkByAddress(uptr address) {
return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
}
-void *AllocationBegin(const void *p) {
+const void *AllocationBegin(const void *p) {
const void *untagged_ptr = UntagPtr(p);
if (!untagged_ptr)
return nullptr;
@@ -411,7 +411,7 @@ void *AllocationBegin(const void *p) {
return nullptr;
tag_t tag = GetTagFromPointer((uptr)p);
- return (void *)AddTagToPointer((uptr)beg, tag);
+ return (const void *)AddTagToPointer((uptr)beg, tag);
}
static uptr AllocationSize(const void *tagged_ptr) {
@@ -658,7 +658,7 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
-void *__sanitizer_get_allocated_begin(const void *p) {
+const void *__sanitizer_get_allocated_begin(const void *p) {
return AllocationBegin(p);
}
diff --git a/compiler-rt/lib/lsan/lsan_allocator.cpp b/compiler-rt/lib/lsan/lsan_allocator.cpp
index b0a54d7cd9bc5..471b134a26471 100644
--- a/compiler-rt/lib/lsan/lsan_allocator.cpp
+++ b/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -145,7 +145,7 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end) {
*end = *begin + sizeof(AllocatorCache);
}
-void *GetMallocBegin(const void *p) {
+const void *GetMallocBegin(const void *p) {
if (!p)
return nullptr;
void *beg = allocator.GetBlockBegin(p);
@@ -158,7 +158,7 @@ void *GetMallocBegin(const void *p) {
return nullptr;
if (m->requested_size == 0)
return nullptr;
- return (void *)beg;
+ return (const void *)beg;
}
uptr GetMallocUsableSize(const void *p) {
@@ -380,7 +380,7 @@ SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_get_allocated_begin(const void *p) {
+const void * __sanitizer_get_allocated_begin(const void *p) {
return GetMallocBegin(p);
}
diff --git a/compiler-rt/lib/memprof/memprof_allocator.cpp b/compiler-rt/lib/memprof/memprof_allocator.cpp
index 80a87d49dfc6e..49c0aad39cfbd 100644
--- a/compiler-rt/lib/memprof/memprof_allocator.cpp
+++ b/compiler-rt/lib/memprof/memprof_allocator.cpp
@@ -681,7 +681,7 @@ int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,
return 0;
}
-void *memprof_malloc_begin(const void *p) {
+const void *memprof_malloc_begin(const void *p) {
u64 user_requested_size;
MemprofChunk *m =
instance.GetMemprofChunkByAddr((uptr)p, user_requested_size);
@@ -690,7 +690,7 @@ void *memprof_malloc_begin(const void *p) {
if (user_requested_size == 0)
return nullptr;
- return (void *)m->Beg();
+ return (const void *)m->Beg();
}
uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
@@ -711,7 +711,7 @@ int __sanitizer_get_ownership(const void *p) {
return memprof_malloc_usable_size(p, 0, 0) != 0;
}
-void *__sanitizer_get_allocated_begin(const void *p) {
+const void *__sanitizer_get_allocated_begin(const void *p) {
return memprof_malloc_begin(p);
}
diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp
index 08ec3314b26e6..1013303af6795 100644
--- a/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/compiler-rt/lib/msan/msan_allocator.cpp
@@ -260,7 +260,7 @@ static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
}
-void *AllocationBegin(const void *p) {
+const void *AllocationBegin(const void *p) {
if (!p)
return nullptr;
void *beg = allocator.GetBlockBegin(p);
@@ -272,7 +272,7 @@ void *AllocationBegin(const void *p) {
if (b->requested_size == 0)
return nullptr;
- return (void *)beg;
+ return (const void *)beg;
}
static uptr AllocationSize(const void *p) {
@@ -388,7 +388,7 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
-void *__sanitizer_get_allocated_begin(const void *p) {
+const void *__sanitizer_get_allocated_begin(const void *p) {
return AllocationBegin(p);
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
index 35c7c97df3299..504109e9d3f6f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
@@ -21,7 +21,7 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const void *
__sanitizer_get_allocated_begin(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr
__sanitizer_get_allocated_size(const void *p);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 3cc4d16955ede..b548265fe6833 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -352,7 +352,7 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
}
-void *user_alloc_begin(const void *p) {
+const void *user_alloc_begin(const void *p) {
if (p == nullptr || !IsAppMem((uptr)p))
return nullptr;
void *beg = allocator()->GetBlockBegin(p);
@@ -363,7 +363,7 @@ void *user_alloc_begin(const void *p) {
if (!b)
return nullptr; // Not a valid pointer.
- return (void *)beg;
+ return (const void *)beg;
}
uptr user_alloc_usable_size(const void *p) {
@@ -444,7 +444,7 @@ int __sanitizer_get_ownership(const void *p) {
return allocator()->GetBlockBegin(p) != 0;
}
-void *__sanitizer_get_allocated_begin(const void *p) {
+const void *__sanitizer_get_allocated_begin(const void *p) {
return user_alloc_begin(p);
}
diff --git a/compiler-rt/test/sanitizer_common/TestCases/get_allocated_begin.cpp b/compiler-rt/test/sanitizer_common/TestCases/get_allocated_begin.cpp
index 6892a4a7fb282..1683063baea26 100644
--- a/compiler-rt/test/sanitizer_common/TestCases/get_allocated_begin.cpp
+++ b/compiler-rt/test/sanitizer_common/TestCases/get_allocated_begin.cpp
@@ -23,7 +23,7 @@ int main(void) {
// Bogus value to unpoison start. Calling __sanitizer_get_allocated_begin
// does not unpoison it.
- void *start = NULL;
+ const void *start = NULL;
for (int j = 0; j < sizes[i]; j++) {
printf("j: %d\n", j);

View File

@ -1,162 +0,0 @@
From b1bd52cd0d8627df1187448b8247a9c7a4675019 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston@google.com>
Date: Wed, 12 Apr 2023 20:53:49 +0000
Subject: [PATCH] Fix tls_get_addr handling for glibc >=2.25
This changes the sanitizers' tls_get_addr handling from
a heuristic check of __signal_safe_memalign allocations
(which has only been used in a since deprecated version
of Google's runtime), to using the sanitizers' interface
function to check if it is a malloc allocation (used
since glibc >= 2.25).
This is one of the approaches proposed by Keno in
https://github.com/google/sanitizers/issues/1409#issuecomment-1214244142
This moves the weak annotation of __sanitizer_get_allocated_size/begin from the header to sanitizer_tls_get_addr.cpp, as suggested by Vitaly in D148060.
Reviewed By: vitalybuka
Differential Revision: https://reviews.llvm.org/D147459
---
.../sanitizer_allocator_interface.h | 4 +--
.../sanitizer_tls_get_addr.cpp | 29 ++++++++++---------
.../sanitizer_common/sanitizer_tls_get_addr.h | 26 +++++++++++------
compiler-rt/test/msan/dtls_test.c | 4 ---
4 files changed, 34 insertions(+), 29 deletions(-)
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
index 504109e9d3f6f..8f3b71eb6ce74 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
@@ -21,8 +21,8 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const void *
-__sanitizer_get_allocated_begin(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE const void *__sanitizer_get_allocated_begin(
+ const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr
__sanitizer_get_allocated_size(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
index b13e2dc9e3327..252979f1c2baa 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
@@ -12,6 +12,7 @@
#include "sanitizer_tls_get_addr.h"
+#include "sanitizer_allocator_interface.h"
#include "sanitizer_atomic.h"
#include "sanitizer_flags.h"
#include "sanitizer_platform_interceptors.h"
@@ -26,13 +27,6 @@ struct TlsGetAddrParam {
uptr offset;
};
-// Glibc starting from 2.19 allocates tls using __signal_safe_memalign,
-// which has such header.
-struct Glibc_2_19_tls_header {
- uptr size;
- uptr start;
-};
-
// This must be static TLS
__attribute__((tls_model("initial-exec")))
static __thread DTLS dtls;
@@ -108,6 +102,14 @@ static const uptr kDtvOffset = 0x800;
static const uptr kDtvOffset = 0;
#endif
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE
+uptr __sanitizer_get_allocated_size(const void *p);
+
+SANITIZER_WEAK_ATTRIBUTE
+const void *__sanitizer_get_allocated_begin(const void *p);
+}
+
DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
uptr static_tls_begin, uptr static_tls_end) {
if (!common_flags()->intercept_tls_get_addr) return 0;
@@ -125,19 +127,18 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
atomic_load(&number_of_live_dtls, memory_order_relaxed));
if (dtls.last_memalign_ptr == tls_beg) {
tls_size = dtls.last_memalign_size;
- VReport(2, "__tls_get_addr: glibc <=2.18 suspected; tls={0x%zx,0x%zx}\n",
+ VReport(2, "__tls_get_addr: glibc <=2.24 suspected; tls={0x%zx,0x%zx}\n",
tls_beg, tls_size);
} else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) {
// This is the static TLS block which was initialized / unpoisoned at thread
// creation.
VReport(2, "__tls_get_addr: static tls: 0x%zx\n", tls_beg);
tls_size = 0;
- } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {
- // We may want to check gnu_get_libc_version().
- Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;
- tls_size = header->size;
- tls_beg = header->start;
- VReport(2, "__tls_get_addr: glibc >=2.19 suspected; tls={0x%zx 0x%zx}\n",
+ } else if (const void *start =
+ __sanitizer_get_allocated_begin((void *)tls_beg)) {
+ tls_beg = (uptr)start;
+ tls_size = __sanitizer_get_allocated_size(start);
+ VReport(2, "__tls_get_addr: glibc >=2.25 suspected; tls={0x%zx,0x%zx}\n",
tls_beg, tls_size);
} else {
VReport(2, "__tls_get_addr: Can't guess glibc version\n");
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h b/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
index a599c0bbc75cc..0ddab61deb102 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
@@ -12,16 +12,24 @@
// the lack of interface that would tell us about the Dynamic TLS (DTLS).
// https://sourceware.org/bugzilla/show_bug.cgi?id=16291
//
-// The matters get worse because the glibc implementation changed between
-// 2.18 and 2.19:
-// https://groups.google.com/forum/#!topic/address-sanitizer/BfwYD8HMxTM
-//
-// Before 2.19, every DTLS chunk is allocated with __libc_memalign,
+// Before 2.25: every DTLS chunk is allocated with __libc_memalign,
// which we intercept and thus know where is the DTLS.
-// Since 2.19, DTLS chunks are allocated with __signal_safe_memalign,
-// which is an internal function that wraps a mmap call, neither of which
-// we can intercept. Luckily, __signal_safe_memalign has a simple parseable
-// header which we can use.
+//
+// Since 2.25: DTLS chunks are allocated with malloc. We could co-opt
+// the malloc interceptor to keep track of the last allocation, similar
+// to how we handle __libc_memalign; however, this adds some overhead
+// (since malloc, unlike __libc_memalign, is commonly called), and
+// requires care to avoid false negatives for LeakSanitizer.
+// Instead, we rely on our internal allocators - which keep track of all
+// its allocations - to determine if an address points to a malloc
+// allocation.
+//
+// There exists a since-deprecated version of Google's internal glibc fork
+// that used __signal_safe_memalign. DTLS_on_tls_get_addr relied on a
+// heuristic check (is the allocation 16 bytes from the start of a page
+// boundary?), which was sometimes erroneous:
+// https://bugs.chromium.org/p/chromium/issues/detail?id=1275223#c15
+// Since that check has no practical use anymore, we have removed it.
//
//===----------------------------------------------------------------------===//
diff --git a/compiler-rt/test/msan/dtls_test.c b/compiler-rt/test/msan/dtls_test.c
index 45c8fd38bf5f6..3c384256147a0 100644
--- a/compiler-rt/test/msan/dtls_test.c
+++ b/compiler-rt/test/msan/dtls_test.c
@@ -12,10 +12,6 @@
// Reports use-of-uninitialized-value, not analyzed
XFAIL: target={{.*netbsd.*}}
- // This is known to be broken with glibc-2.27+
- // https://bugs.llvm.org/show_bug.cgi?id=37804
- XFAIL: glibc-2.27
-
*/
#ifndef BUILD_SO

View File

@ -1,842 +0,0 @@
From 4cb60673a0a25a25d171716c5b90e7a3368d434f Mon Sep 17 00:00:00 2001
From: Alexey Lapshin <a.v.lapshin@mail.ru>
Date: Mon, 30 Jan 2023 15:05:53 +0100
Subject: [PATCH] [dsymutil] dsymutil produces broken lines info (probably)
with LTO on mac
This patch fixes #60307 issue. The 8bb4451 introduces the possibility
to unite overlapped or adjacent address ranges to keep address ranges
in an unambiguous state. The AddressRangesMap is used to normalize
address ranges. The AddressRangesMap keeps address ranges and the value
of the relocated address. For intersected range, it creates a united
range that keeps the last inserted mapping value. The same for adjusted ranges.
While it is OK to use the last inserted mapping value for intersected ranges
(as there is no way how to resolve ambiguity) It is not OK to use the
last inserted value for adjacent address ranges. Currently, two following
address ranges are united into a single one:
{0,24,17e685c} {24,d8,55afe20} -> {0,d8,55afe20}
To avoid the problem, the AddressRangesMap should not unite adjacent address ranges
with different relocated addresses. Instead, it should leave adjacent address ranges
as separate ranges. So, the ranges should look like this:
{0,24,17e685c} {24,d8,55afe20}
Differential Revision: https://reviews.llvm.org/D142936
---
llvm/include/llvm/ADT/AddressRanges.h | 206 ++++++++-----
.../llvm/DWARFLinker/DWARFLinkerCompileUnit.h | 2 +-
llvm/lib/DWARFLinker/DWARFLinker.cpp | 36 +--
llvm/lib/DWARFLinker/DWARFStreamer.cpp | 5 +-
llvm/lib/Support/AddressRanges.cpp | 70 -----
llvm/lib/Support/CMakeLists.txt | 1 -
llvm/unittests/Support/AddressRangeTest.cpp | 285 +++++++++++++++---
7 files changed, 398 insertions(+), 207 deletions(-)
delete mode 100644 llvm/lib/Support/AddressRanges.cpp
diff --git a/llvm/include/llvm/ADT/AddressRanges.h b/llvm/include/llvm/ADT/AddressRanges.h
index f2052d82e7c1..415d30bbb5cf 100644
--- a/llvm/include/llvm/ADT/AddressRanges.h
+++ b/llvm/include/llvm/ADT/AddressRanges.h
@@ -28,7 +28,11 @@ public:
uint64_t start() const { return Start; }
uint64_t end() const { return End; }
uint64_t size() const { return End - Start; }
+ uint64_t empty() const { return size() == 0; }
bool contains(uint64_t Addr) const { return Start <= Addr && Addr < End; }
+ bool contains(const AddressRange &R) const {
+ return Start <= R.Start && R.End <= End;
+ }
bool intersects(const AddressRange &R) const {
return Start < R.End && R.Start < End;
}
@@ -45,101 +49,163 @@ private:
uint64_t End = 0;
};
-/// The AddressRanges class helps normalize address range collections.
-/// This class keeps a sorted vector of AddressRange objects and can perform
-/// insertions and searches efficiently. The address ranges are always sorted
-/// and never contain any invalid or empty address ranges.
-/// Intersecting([100,200), [150,300)) and adjacent([100,200), [200,300))
-/// address ranges are combined during insertion.
-class AddressRanges {
+/// The AddressRangesBase class presents the base functionality for the
+/// normalized address ranges collection. This class keeps a sorted vector
+/// of AddressRange-like objects and can perform searches efficiently.
+/// The address ranges are always sorted and never contain any invalid,
+/// empty or intersected address ranges.
+
+template <typename T> class AddressRangesBase {
protected:
- using Collection = SmallVector<AddressRange>;
+ using Collection = SmallVector<T>;
Collection Ranges;
public:
void clear() { Ranges.clear(); }
bool empty() const { return Ranges.empty(); }
- bool contains(uint64_t Addr) const { return find(Addr) != Ranges.end(); }
+ bool contains(uint64_t Addr) const {
+ return find(Addr, Addr + 1) != Ranges.end();
+ }
bool contains(AddressRange Range) const {
- return find(Range) != Ranges.end();
+ return find(Range.start(), Range.end()) != Ranges.end();
}
- std::optional<AddressRange> getRangeThatContains(uint64_t Addr) const {
- Collection::const_iterator It = find(Addr);
+ void reserve(size_t Capacity) { Ranges.reserve(Capacity); }
+ size_t size() const { return Ranges.size(); }
+
+ std::optional<T> getRangeThatContains(uint64_t Addr) const {
+ typename Collection::const_iterator It = find(Addr, Addr + 1);
if (It == Ranges.end())
return std::nullopt;
return *It;
}
- Collection::const_iterator insert(AddressRange Range);
- void reserve(size_t Capacity) { Ranges.reserve(Capacity); }
- size_t size() const { return Ranges.size(); }
- bool operator==(const AddressRanges &RHS) const {
- return Ranges == RHS.Ranges;
- }
- const AddressRange &operator[](size_t i) const {
+
+ typename Collection::const_iterator begin() const { return Ranges.begin(); }
+ typename Collection::const_iterator end() const { return Ranges.end(); }
+
+ const T &operator[](size_t i) const {
assert(i < Ranges.size());
return Ranges[i];
}
- Collection::const_iterator begin() const { return Ranges.begin(); }
- Collection::const_iterator end() const { return Ranges.end(); }
+
+ bool operator==(const AddressRangesBase<T> &RHS) const {
+ return Ranges == RHS.Ranges;
+ }
protected:
- Collection::const_iterator find(uint64_t Addr) const;
- Collection::const_iterator find(AddressRange Range) const;
+ typename Collection::const_iterator find(uint64_t Start, uint64_t End) const {
+ if (Start >= End)
+ return Ranges.end();
+
+ auto It =
+ std::partition_point(Ranges.begin(), Ranges.end(), [=](const T &R) {
+ return AddressRange(R).start() <= Start;
+ });
+
+ if (It == Ranges.begin())
+ return Ranges.end();
+
+ --It;
+ if (End > AddressRange(*It).end())
+ return Ranges.end();
+
+ return It;
+ }
};
-/// AddressRangesMap class maps values to the address ranges.
-/// It keeps address ranges and corresponding values. If ranges
-/// are combined during insertion, then combined range keeps
-/// newly inserted value.
-template <typename T> class AddressRangesMap : protected AddressRanges {
+/// The AddressRanges class helps normalize address range collections.
+/// This class keeps a sorted vector of AddressRange objects and can perform
+/// insertions and searches efficiently. Intersecting([100,200), [150,300))
+/// and adjacent([100,200), [200,300)) address ranges are combined during
+/// insertion.
+class AddressRanges : public AddressRangesBase<AddressRange> {
public:
- void clear() {
- Ranges.clear();
- Values.clear();
+ Collection::const_iterator insert(AddressRange Range) {
+ if (Range.empty())
+ return Ranges.end();
+
+ auto It = llvm::upper_bound(Ranges, Range);
+ auto It2 = It;
+ while (It2 != Ranges.end() && It2->start() <= Range.end())
+ ++It2;
+ if (It != It2) {
+ Range = {Range.start(), std::max(Range.end(), std::prev(It2)->end())};
+ It = Ranges.erase(It, It2);
+ }
+ if (It != Ranges.begin() && Range.start() <= std::prev(It)->end()) {
+ --It;
+ *It = {It->start(), std::max(It->end(), Range.end())};
+ return It;
+ }
+
+ return Ranges.insert(It, Range);
}
- bool empty() const { return AddressRanges::empty(); }
- bool contains(uint64_t Addr) const { return AddressRanges::contains(Addr); }
- bool contains(AddressRange Range) const {
- return AddressRanges::contains(Range);
- }
- void insert(AddressRange Range, T Value) {
- size_t InputSize = Ranges.size();
- Collection::const_iterator RangesIt = AddressRanges::insert(Range);
- if (RangesIt == Ranges.end())
- return;
+};
- // make Values match to Ranges.
- size_t Idx = RangesIt - Ranges.begin();
- typename ValuesCollection::iterator ValuesIt = Values.begin() + Idx;
- if (InputSize < Ranges.size())
- Values.insert(ValuesIt, T());
- else if (InputSize > Ranges.size())
- Values.erase(ValuesIt, ValuesIt + InputSize - Ranges.size());
- assert(Ranges.size() == Values.size());
-
- // set value to the inserted or combined range.
- Values[Idx] = Value;
- }
- size_t size() const {
- assert(Ranges.size() == Values.size());
- return AddressRanges::size();
- }
- std::optional<std::pair<AddressRange, T>>
- getRangeValueThatContains(uint64_t Addr) const {
- Collection::const_iterator It = find(Addr);
- if (It == Ranges.end())
- return std::nullopt;
+class AddressRangeValuePair {
+public:
+ operator AddressRange() const { return Range; }
- return std::make_pair(*It, Values[It - Ranges.begin()]);
- }
- std::pair<AddressRange, T> operator[](size_t Idx) const {
- return std::make_pair(Ranges[Idx], Values[Idx]);
- }
+ AddressRange Range;
+ int64_t Value = 0;
+};
-protected:
- using ValuesCollection = SmallVector<T>;
- ValuesCollection Values;
+inline bool operator==(const AddressRangeValuePair &LHS,
+ const AddressRangeValuePair &RHS) {
+ return LHS.Range == RHS.Range && LHS.Value == RHS.Value;
+}
+
+/// AddressRangesMap class maps values to the address ranges.
+/// It keeps normalized address ranges and corresponding values.
+/// This class keeps a sorted vector of AddressRangeValuePair objects
+/// and can perform insertions and searches efficiently.
+/// Intersecting([100,200), [150,300)) ranges splitted into non-conflicting
+/// parts([100,200), [200,300)). Adjacent([100,200), [200,300)) address
+/// ranges are not combined during insertion.
+class AddressRangesMap : public AddressRangesBase<AddressRangeValuePair> {
+public:
+ void insert(AddressRange Range, int64_t Value) {
+ if (Range.empty())
+ return;
+
+ // Search for range which is less than or equal incoming Range.
+ auto It = std::partition_point(Ranges.begin(), Ranges.end(),
+ [=](const AddressRangeValuePair &R) {
+ return R.Range.start() <= Range.start();
+ });
+
+ if (It != Ranges.begin())
+ It--;
+
+ while (!Range.empty()) {
+ // Inserted range does not overlap with any range.
+ // Store it into the Ranges collection.
+ if (It == Ranges.end() || Range.end() <= It->Range.start()) {
+ Ranges.insert(It, {Range, Value});
+ return;
+ }
+
+ // Inserted range partially overlaps with current range.
+ // Store not overlapped part of inserted range.
+ if (Range.start() < It->Range.start()) {
+ It = Ranges.insert(It, {{Range.start(), It->Range.start()}, Value});
+ It++;
+ Range = {It->Range.start(), Range.end()};
+ continue;
+ }
+
+ // Inserted range fully overlaps with current range.
+ if (Range.end() <= It->Range.end())
+ return;
+
+ // Inserted range partially overlaps with current range.
+ // Remove overlapped part from the inserted range.
+ if (Range.start() < It->Range.end())
+ Range = {It->Range.end(), Range.end()};
+
+ It++;
+ }
+ }
};
} // namespace llvm
diff --git a/llvm/include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h b/llvm/include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h
index 5b0ea339c4d6..9c7f24e69d48 100644
--- a/llvm/include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h
+++ b/llvm/include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h
@@ -21,7 +21,7 @@ class DeclContext;
/// Mapped value in the address map is the offset to apply to the
/// linked address.
-using RangesTy = AddressRangesMap<int64_t>;
+using RangesTy = AddressRangesMap;
// FIXME: Delete this structure.
struct PatchLocation {
diff --git a/llvm/lib/DWARFLinker/DWARFLinker.cpp b/llvm/lib/DWARFLinker/DWARFLinker.cpp
index 9f6e54377ede..d302d61894fa 100644
--- a/llvm/lib/DWARFLinker/DWARFLinker.cpp
+++ b/llvm/lib/DWARFLinker/DWARFLinker.cpp
@@ -1659,7 +1659,7 @@ void DWARFLinker::patchRangesForUnit(const CompileUnit &Unit,
DWARFDataExtractor RangeExtractor(OrigDwarf.getDWARFObj(),
OrigDwarf.getDWARFObj().getRangesSection(),
OrigDwarf.isLittleEndian(), AddressSize);
- std::optional<std::pair<AddressRange, int64_t>> CachedRange;
+ std::optional<AddressRangeValuePair> CachedRange;
DWARFUnit &OrigUnit = Unit.getOrigUnit();
auto OrigUnitDie = OrigUnit.getUnitDIE(false);
uint64_t UnitBaseAddress =
@@ -1687,9 +1687,9 @@ void DWARFLinker::patchRangesForUnit(const CompileUnit &Unit,
}
if (!CachedRange ||
- !CachedRange->first.contains(Range.StartAddress + BaseAddress))
- CachedRange = FunctionRanges.getRangeValueThatContains(
- Range.StartAddress + BaseAddress);
+ !CachedRange->Range.contains(Range.StartAddress + BaseAddress))
+ CachedRange = FunctionRanges.getRangeThatContains(Range.StartAddress +
+ BaseAddress);
// All range entries should lie in the function range.
if (!CachedRange) {
@@ -1698,8 +1698,8 @@ void DWARFLinker::patchRangesForUnit(const CompileUnit &Unit,
}
LinkedRanges.insert(
- {Range.StartAddress + BaseAddress + CachedRange->second,
- Range.EndAddress + BaseAddress + CachedRange->second});
+ {Range.StartAddress + BaseAddress + CachedRange->Value,
+ Range.EndAddress + BaseAddress + CachedRange->Value});
}
}
@@ -1802,7 +1802,7 @@ void DWARFLinker::patchLineTableForUnit(CompileUnit &Unit,
// in NewRows.
std::vector<DWARFDebugLine::Row> Seq;
const auto &FunctionRanges = Unit.getFunctionRanges();
- std::optional<std::pair<AddressRange, int64_t>> CurrRange;
+ std::optional<AddressRangeValuePair> CurrRange;
// FIXME: This logic is meant to generate exactly the same output as
// Darwin's classic dsymutil. There is a nicer way to implement this
@@ -1821,13 +1821,13 @@ void DWARFLinker::patchLineTableForUnit(CompileUnit &Unit,
// it is marked as end_sequence in the input (because in that
// case, the relocation offset is accurate and that entry won't
// serve as the start of another function).
- if (!CurrRange || !CurrRange->first.contains(Row.Address.Address) ||
- (Row.Address.Address == CurrRange->first.end() && !Row.EndSequence)) {
+ if (!CurrRange || !CurrRange->Range.contains(Row.Address.Address) ||
+ (Row.Address.Address == CurrRange->Range.end() && !Row.EndSequence)) {
// We just stepped out of a known range. Insert a end_sequence
// corresponding to the end of the range.
uint64_t StopAddress =
- CurrRange ? CurrRange->first.end() + CurrRange->second : -1ULL;
- CurrRange = FunctionRanges.getRangeValueThatContains(Row.Address.Address);
+ CurrRange ? CurrRange->Range.end() + CurrRange->Value : -1ULL;
+ CurrRange = FunctionRanges.getRangeThatContains(Row.Address.Address);
if (!CurrRange) {
if (StopAddress != -1ULL) {
// Try harder by looking in the Address ranges map.
@@ -1836,9 +1836,9 @@ void DWARFLinker::patchLineTableForUnit(CompileUnit &Unit,
// for now do as dsymutil.
// FIXME: Understand exactly what cases this addresses and
// potentially remove it along with the Ranges map.
- if (std::optional<std::pair<AddressRange, int64_t>> Range =
- Ranges.getRangeValueThatContains(Row.Address.Address))
- StopAddress = Row.Address.Address + (*Range).second;
+ if (std::optional<AddressRangeValuePair> Range =
+ Ranges.getRangeThatContains(Row.Address.Address))
+ StopAddress = Row.Address.Address + (*Range).Value;
}
}
if (StopAddress != -1ULL && !Seq.empty()) {
@@ -1863,7 +1863,7 @@ void DWARFLinker::patchLineTableForUnit(CompileUnit &Unit,
continue;
// Relocate row address and add it to the current sequence.
- Row.Address.Address += CurrRange->second;
+ Row.Address.Address += CurrRange->Value;
Seq.emplace_back(Row);
if (Row.EndSequence)
@@ -2002,8 +2002,8 @@ void DWARFLinker::patchFrameInfoForObject(const DWARFFile &File,
// the function entry point, thus we can't just lookup the address
// in the debug map. Use the AddressInfo's range map to see if the FDE
// describes something that we can relocate.
- std::optional<std::pair<AddressRange, int64_t>> Range =
- Ranges.getRangeValueThatContains(Loc);
+ std::optional<AddressRangeValuePair> Range =
+ Ranges.getRangeThatContains(Loc);
if (!Range) {
// The +4 is to account for the size of the InitialLength field itself.
InputOffset = EntryOffset + InitialLength + 4;
@@ -2032,7 +2032,7 @@ void DWARFLinker::patchFrameInfoForObject(const DWARFFile &File,
// fields that will get reconstructed by emitFDE().
unsigned FDERemainingBytes = InitialLength - (4 + AddrSize);
TheDwarfEmitter->emitFDE(IteratorInserted.first->getValue(), AddrSize,
- Loc + Range->second,
+ Loc + Range->Value,
FrameData.substr(InputOffset, FDERemainingBytes));
InputOffset += FDERemainingBytes;
}
diff --git a/llvm/lib/DWARFLinker/DWARFStreamer.cpp b/llvm/lib/DWARFLinker/DWARFStreamer.cpp
index 5cad267fd845..ae79e8cb9066 100644
--- a/llvm/lib/DWARFLinker/DWARFStreamer.cpp
+++ b/llvm/lib/DWARFLinker/DWARFStreamer.cpp
@@ -402,10 +402,9 @@ void DwarfStreamer::emitUnitRangesEntries(CompileUnit &Unit,
// Linked addresses might end up in a different order.
// Build linked address ranges.
AddressRanges LinkedRanges;
- for (size_t Idx = 0; Idx < FunctionRanges.size(); Idx++)
+ for (const AddressRangeValuePair &Range : FunctionRanges)
LinkedRanges.insert(
- {FunctionRanges[Idx].first.start() + FunctionRanges[Idx].second,
- FunctionRanges[Idx].first.end() + FunctionRanges[Idx].second});
+ {Range.Range.start() + Range.Value, Range.Range.end() + Range.Value});
if (!FunctionRanges.empty())
emitDwarfDebugArangesTable(Unit, LinkedRanges);
diff --git a/llvm/lib/Support/AddressRanges.cpp b/llvm/lib/Support/AddressRanges.cpp
deleted file mode 100644
index 187d5be00dae..000000000000
--- a/llvm/lib/Support/AddressRanges.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-//===- AddressRanges.cpp ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/ADT/AddressRanges.h"
-#include "llvm/ADT/STLExtras.h"
-#include <inttypes.h>
-
-using namespace llvm;
-
-AddressRanges::Collection::const_iterator
-AddressRanges::insert(AddressRange Range) {
- if (Range.size() == 0)
- return Ranges.end();
-
- auto It = llvm::upper_bound(Ranges, Range);
- auto It2 = It;
- while (It2 != Ranges.end() && It2->start() <= Range.end())
- ++It2;
- if (It != It2) {
- Range = {Range.start(), std::max(Range.end(), std::prev(It2)->end())};
- It = Ranges.erase(It, It2);
- }
- if (It != Ranges.begin() && Range.start() <= std::prev(It)->end()) {
- --It;
- *It = {It->start(), std::max(It->end(), Range.end())};
- return It;
- }
-
- return Ranges.insert(It, Range);
-}
-
-AddressRanges::Collection::const_iterator
-AddressRanges::find(uint64_t Addr) const {
- auto It = std::partition_point(
- Ranges.begin(), Ranges.end(),
- [=](const AddressRange &R) { return R.start() <= Addr; });
-
- if (It == Ranges.begin())
- return Ranges.end();
-
- --It;
- if (Addr >= It->end())
- return Ranges.end();
-
- return It;
-}
-
-AddressRanges::Collection::const_iterator
-AddressRanges::find(AddressRange Range) const {
- if (Range.size() == 0)
- return Ranges.end();
-
- auto It = std::partition_point(
- Ranges.begin(), Ranges.end(),
- [=](const AddressRange &R) { return R.start() <= Range.start(); });
-
- if (It == Ranges.begin())
- return Ranges.end();
-
- --It;
- if (Range.end() > It->end())
- return Ranges.end();
-
- return It;
-}
diff --git a/llvm/lib/Support/CMakeLists.txt b/llvm/lib/Support/CMakeLists.txt
index 4cbc3b79f3bb..8fbb2ca4c164 100644
--- a/llvm/lib/Support/CMakeLists.txt
+++ b/llvm/lib/Support/CMakeLists.txt
@@ -117,7 +117,6 @@ endif()
add_subdirectory(BLAKE3)
add_llvm_component_library(LLVMSupport
- AddressRanges.cpp
ABIBreak.cpp
AMDGPUMetadata.cpp
APFixedPoint.cpp
diff --git a/llvm/unittests/Support/AddressRangeTest.cpp b/llvm/unittests/Support/AddressRangeTest.cpp
index 468f1e22ffa8..06b326678402 100644
--- a/llvm/unittests/Support/AddressRangeTest.cpp
+++ b/llvm/unittests/Support/AddressRangeTest.cpp
@@ -149,8 +149,31 @@ TEST(AddressRangeTest, TestRanges) {
EXPECT_EQ(Ranges[0], AddressRange(0x1000, 0x5000));
}
+TEST(AddressRangeTest, TestRangesRandom) {
+ AddressRanges Ranges;
+ size_t NumElements = 100;
+
+ std::srand(std::time(nullptr));
+
+ // Fill ranges.
+ for (size_t Idx = 0; Idx < NumElements; Idx++) {
+ uint64_t Start = static_cast<uint64_t>(std::rand() % 1000);
+ uint64_t End = Start + static_cast<uint64_t>(std::rand() % 1000);
+ Ranges.insert({Start, End});
+ }
+
+ // Check ranges.
+ for (size_t Idx = 0; Idx + 1 < Ranges.size(); Idx++) {
+ // Check that ranges are not intersected.
+ EXPECT_FALSE(Ranges[Idx].intersects(Ranges[Idx + 1]));
+
+ // Check that ranges are sorted and not adjusted.
+ EXPECT_TRUE(Ranges[Idx].end() < Ranges[Idx + 1].start());
+ }
+}
+
TEST(AddressRangeTest, TestRangesMap) {
- AddressRangesMap<int> Ranges;
+ AddressRangesMap Ranges;
EXPECT_EQ(Ranges.size(), 0u);
EXPECT_TRUE(Ranges.empty());
@@ -162,73 +185,247 @@ TEST(AddressRangeTest, TestRangesMap) {
EXPECT_TRUE(Ranges.contains(0x1500));
EXPECT_TRUE(Ranges.contains(AddressRange(0x1000, 0x2000)));
+ ///////////////////////////////////////
+ /// Check ranges with the same mapped value.
+
+ // Clear ranges.
+ Ranges.clear();
+ EXPECT_EQ(Ranges.size(), 0u);
+ EXPECT_TRUE(Ranges.empty());
+
+ // Add range and check mapped value.
+ Ranges.insert(AddressRange(0x1000, 0x2000), 0x11);
+ EXPECT_EQ(Ranges.size(), 1u);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x1000)->Value, 0x11);
+
+ // Add adjacent range and check mapped value.
+ Ranges.insert(AddressRange(0x2000, 0x3000), 0x11);
+ EXPECT_EQ(Ranges.size(), 2u);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x1000)->Value, 0x11);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x2000)->Value, 0x11);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x2900)->Value, 0x11);
+ EXPECT_FALSE(Ranges.getRangeThatContains(0x3000));
+
+ // Add intersecting range and check mapped value.
+ Ranges.insert(AddressRange(0x1000, 0x3000), 0x11);
+ EXPECT_EQ(Ranges.size(), 2u);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x1000)->Value, 0x11);
+
+ // Add second range and check mapped values.
+ Ranges.insert(AddressRange(0x4000, 0x5000), 0x11);
+ EXPECT_EQ(Ranges.size(), 3u);
+ EXPECT_EQ(Ranges[0].Range, AddressRange(0x1000, 0x2000));
+ EXPECT_EQ(Ranges[0].Value, 0x11);
+ EXPECT_EQ(Ranges[1].Range, AddressRange(0x2000, 0x3000));
+ EXPECT_EQ(Ranges[1].Value, 0x11);
+ EXPECT_EQ(Ranges[2].Range, AddressRange(0x4000, 0x5000));
+ EXPECT_EQ(Ranges[2].Value, 0x11);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x1000)->Value, 0x11);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x4000)->Value, 0x11);
+
+ // Add intersecting range and check mapped value.
+ Ranges.insert(AddressRange(0x0, 0x6000), 0x11);
+ EXPECT_EQ(Ranges.size(), 6u);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x1000)->Value, 0x11);
+
+ // Check that mapped values are correctly preserved for combined ranges.
+ Ranges.clear();
+ Ranges.insert(AddressRange(0x0, 0xff), 0x11);
+ Ranges.insert(AddressRange(0x100, 0x1ff), 0x11);
+ Ranges.insert(AddressRange(0x200, 0x2ff), 0x11);
+ Ranges.insert(AddressRange(0x500, 0x5ff), 0x11);
+ Ranges.insert(AddressRange(0x300, 0x3ff), 0x11);
+ Ranges.insert(AddressRange(0x400, 0x4ff), 0x11);
+ Ranges.insert(AddressRange(0x600, 0x6ff), 0x11);
+ EXPECT_EQ(Ranges.size(), 7u);
+
+ Ranges.insert(AddressRange(0x150, 0x350), 0x11);
+ EXPECT_EQ(Ranges.size(), 9u);
+ EXPECT_EQ(Ranges[0].Range, AddressRange(0x0, 0xff));
+ EXPECT_EQ(Ranges[0].Value, 0x11);
+ EXPECT_EQ(Ranges[1].Range, AddressRange(0x100, 0x1ff));
+ EXPECT_EQ(Ranges[1].Value, 0x11);
+ EXPECT_EQ(Ranges[2].Range, AddressRange(0x1ff, 0x200));
+ EXPECT_EQ(Ranges[2].Value, 0x11);
+ EXPECT_EQ(Ranges[3].Range, AddressRange(0x200, 0x2ff));
+ EXPECT_EQ(Ranges[3].Value, 0x11);
+ EXPECT_EQ(Ranges[4].Range, AddressRange(0x2ff, 0x300));
+ EXPECT_EQ(Ranges[4].Value, 0x11);
+ EXPECT_EQ(Ranges[5].Range, AddressRange(0x300, 0x3ff));
+ EXPECT_EQ(Ranges[5].Value, 0x11);
+ EXPECT_EQ(Ranges[6].Range, AddressRange(0x400, 0x4ff));
+ EXPECT_EQ(Ranges[6].Value, 0x11);
+ EXPECT_EQ(Ranges[7].Range, AddressRange(0x500, 0x5ff));
+ EXPECT_EQ(Ranges[7].Value, 0x11);
+ EXPECT_EQ(Ranges[8].Range, AddressRange(0x600, 0x6ff));
+ EXPECT_EQ(Ranges[8].Value, 0x11);
+
+ Ranges.insert(AddressRange(0x3ff, 0x400), 0x11);
+ EXPECT_EQ(Ranges.size(), 10u);
+ EXPECT_EQ(Ranges[0].Range, AddressRange(0x0, 0xff));
+ EXPECT_EQ(Ranges[0].Value, 0x11);
+ EXPECT_EQ(Ranges[1].Range, AddressRange(0x100, 0x1ff));
+ EXPECT_EQ(Ranges[1].Value, 0x11);
+ EXPECT_EQ(Ranges[2].Range, AddressRange(0x1ff, 0x200));
+ EXPECT_EQ(Ranges[2].Value, 0x11);
+ EXPECT_EQ(Ranges[3].Range, AddressRange(0x200, 0x2ff));
+ EXPECT_EQ(Ranges[3].Value, 0x11);
+ EXPECT_EQ(Ranges[4].Range, AddressRange(0x2ff, 0x300));
+ EXPECT_EQ(Ranges[4].Value, 0x11);
+ EXPECT_EQ(Ranges[5].Range, AddressRange(0x300, 0x3ff));
+ EXPECT_EQ(Ranges[5].Value, 0x11);
+ EXPECT_EQ(Ranges[6].Range, AddressRange(0x3ff, 0x400));
+ EXPECT_EQ(Ranges[6].Value, 0x11);
+ EXPECT_EQ(Ranges[7].Range, AddressRange(0x400, 0x4ff));
+ EXPECT_EQ(Ranges[7].Value, 0x11);
+ EXPECT_EQ(Ranges[8].Range, AddressRange(0x500, 0x5ff));
+ EXPECT_EQ(Ranges[8].Value, 0x11);
+ EXPECT_EQ(Ranges[9].Range, AddressRange(0x600, 0x6ff));
+ EXPECT_EQ(Ranges[9].Value, 0x11);
+
+ /////////////////////////////////////////////
+ /// Check ranges with various mapped values.
+
// Clear ranges.
Ranges.clear();
EXPECT_EQ(Ranges.size(), 0u);
EXPECT_TRUE(Ranges.empty());
- // Add range and check value.
+ // Add range and check mapped value.
Ranges.insert(AddressRange(0x1000, 0x2000), 0xfe);
EXPECT_EQ(Ranges.size(), 1u);
- EXPECT_EQ(Ranges.getRangeValueThatContains(0x1000)->second, 0xfe);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x1000)->Value, 0xfe);
- // Add adjacent range and check value.
+ // Add adjacent range and check mapped value.
Ranges.insert(AddressRange(0x2000, 0x3000), 0xfc);
- EXPECT_EQ(Ranges.size(), 1u);
- EXPECT_EQ(Ranges.getRangeValueThatContains(0x1000)->second, 0xfc);
- EXPECT_EQ(Ranges.getRangeValueThatContains(0x2000)->second, 0xfc);
- EXPECT_EQ(Ranges.getRangeValueThatContains(0x2900)->second, 0xfc);
- EXPECT_FALSE(Ranges.getRangeValueThatContains(0x3000));
+ EXPECT_EQ(Ranges.size(), 2u);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x1000)->Value, 0xfe);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x2000)->Value, 0xfc);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x2900)->Value, 0xfc);
+ EXPECT_FALSE(Ranges.getRangeThatContains(0x3000));
- // Add intersecting range and check value.
- Ranges.insert(AddressRange(0x2000, 0x3000), 0xff);
- EXPECT_EQ(Ranges.size(), 1u);
- EXPECT_EQ(Ranges.getRangeValueThatContains(0x1000)->second, 0xff);
+ // Add intersecting range and check mapped value.
+ Ranges.insert(AddressRange(0x1000, 0x3000), 0xff);
+ EXPECT_EQ(Ranges.size(), 2u);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x1000)->Value, 0xfe);
- // Add second range and check values.
+ // Add one more range and check mapped values.
Ranges.insert(AddressRange(0x4000, 0x5000), 0x0);
- EXPECT_EQ(Ranges.size(), 2u);
- EXPECT_EQ(Ranges[0].second, 0xff);
- EXPECT_EQ(Ranges[1].second, 0x0);
- EXPECT_EQ(Ranges.getRangeValueThatContains(0x1000)->second, 0xff);
- EXPECT_EQ(Ranges.getRangeValueThatContains(0x4000)->second, 0x0);
+ EXPECT_EQ(Ranges.size(), 3u);
+ EXPECT_EQ(Ranges[0].Value, 0xfe);
+ EXPECT_EQ(Ranges[1].Value, 0xfc);
+ EXPECT_EQ(Ranges[2].Value, 0x0);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x1000)->Value, 0xfe);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x4000)->Value, 0x0);
- // Add intersecting range and check value.
+ // Add intersecting range and check mapped value.
Ranges.insert(AddressRange(0x0, 0x6000), 0x1);
- EXPECT_EQ(Ranges.size(), 1u);
- EXPECT_EQ(Ranges.getRangeValueThatContains(0x1000)->second, 0x1);
+ EXPECT_EQ(Ranges.size(), 6u);
+ EXPECT_EQ(Ranges[0].Value, 0x1);
+ EXPECT_EQ(Ranges[1].Value, 0xfe);
+ EXPECT_EQ(Ranges[2].Value, 0xfc);
+ EXPECT_EQ(Ranges[3].Value, 0x1);
+ EXPECT_EQ(Ranges[4].Value, 0x0);
+ EXPECT_EQ(Ranges[5].Value, 0x1);
+ EXPECT_EQ(Ranges.getRangeThatContains(0x1000)->Value, 0xfe);
- // Check that values are correctly preserved for combined ranges.
+ // Check that mapped values are correctly preserved for combined ranges.
Ranges.clear();
Ranges.insert(AddressRange(0x0, 0xff), 0x1);
Ranges.insert(AddressRange(0x100, 0x1ff), 0x2);
Ranges.insert(AddressRange(0x200, 0x2ff), 0x3);
Ranges.insert(AddressRange(0x300, 0x3ff), 0x4);
- Ranges.insert(AddressRange(0x400, 0x4ff), 0x5);
Ranges.insert(AddressRange(0x500, 0x5ff), 0x6);
+ Ranges.insert(AddressRange(0x400, 0x4ff), 0x5);
Ranges.insert(AddressRange(0x600, 0x6ff), 0x7);
+ EXPECT_EQ(Ranges.size(), 7u);
Ranges.insert(AddressRange(0x150, 0x350), 0xff);
- EXPECT_EQ(Ranges.size(), 5u);
- EXPECT_EQ(Ranges[0].first, AddressRange(0x0, 0xff));
- EXPECT_EQ(Ranges[0].second, 0x1);
- EXPECT_EQ(Ranges[1].first, AddressRange(0x100, 0x3ff));
- EXPECT_EQ(Ranges[1].second, 0xff);
- EXPECT_EQ(Ranges[2].first, AddressRange(0x400, 0x4ff));
- EXPECT_EQ(Ranges[2].second, 0x5);
- EXPECT_EQ(Ranges[3].first, AddressRange(0x500, 0x5ff));
- EXPECT_EQ(Ranges[3].second, 0x6);
- EXPECT_EQ(Ranges[4].first, AddressRange(0x600, 0x6ff));
- EXPECT_EQ(Ranges[4].second, 0x7);
+ EXPECT_EQ(Ranges.size(), 9u);
+ EXPECT_EQ(Ranges[0].Range, AddressRange(0x0, 0xff));
+ EXPECT_EQ(Ranges[0].Value, 0x1);
+ EXPECT_EQ(Ranges[1].Range, AddressRange(0x100, 0x1ff));
+ EXPECT_EQ(Ranges[1].Value, 0x2);
+ EXPECT_EQ(Ranges[2].Range, AddressRange(0x1ff, 0x200));
+ EXPECT_EQ(Ranges[2].Value, 0xff);
+ EXPECT_EQ(Ranges[3].Range, AddressRange(0x200, 0x2ff));
+ EXPECT_EQ(Ranges[3].Value, 0x3);
+ EXPECT_EQ(Ranges[4].Range, AddressRange(0x2ff, 0x300));
+ EXPECT_EQ(Ranges[4].Value, 0xff);
+ EXPECT_EQ(Ranges[5].Range, AddressRange(0x300, 0x3ff));
+ EXPECT_EQ(Ranges[5].Value, 0x4);
+ EXPECT_EQ(Ranges[6].Range, AddressRange(0x400, 0x4ff));
+ EXPECT_EQ(Ranges[6].Value, 0x5);
+ EXPECT_EQ(Ranges[7].Range, AddressRange(0x500, 0x5ff));
+ EXPECT_EQ(Ranges[7].Value, 0x6);
+ EXPECT_EQ(Ranges[8].Range, AddressRange(0x600, 0x6ff));
+ EXPECT_EQ(Ranges[8].Value, 0x7);
+ Ranges.insert(AddressRange(0x650, 0x700), 0x8);
Ranges.insert(AddressRange(0x3ff, 0x400), 0x5);
- EXPECT_EQ(Ranges.size(), 4u);
- EXPECT_EQ(Ranges[0].first, AddressRange(0x0, 0xff));
- EXPECT_EQ(Ranges[0].second, 0x1);
- EXPECT_EQ(Ranges[1].first, AddressRange(0x100, 0x4ff));
- EXPECT_EQ(Ranges[1].second, 0x5);
- EXPECT_EQ(Ranges[2].first, AddressRange(0x500, 0x5ff));
- EXPECT_EQ(Ranges[2].second, 0x6);
- EXPECT_EQ(Ranges[3].first, AddressRange(0x600, 0x6ff));
- EXPECT_EQ(Ranges[3].second, 0x7);
+ Ranges.insert(AddressRange(0x0, 0x40), 0xee);
+ EXPECT_EQ(Ranges.size(), 11u);
+ EXPECT_EQ(Ranges[0].Range, AddressRange(0x0, 0xff));
+ EXPECT_EQ(Ranges[0].Value, 0x1);
+ EXPECT_EQ(Ranges[1].Range, AddressRange(0x100, 0x1ff));
+ EXPECT_EQ(Ranges[1].Value, 0x2);
+ EXPECT_EQ(Ranges[2].Range, AddressRange(0x1ff, 0x200));
+ EXPECT_EQ(Ranges[2].Value, 0xff);
+ EXPECT_EQ(Ranges[3].Range, AddressRange(0x200, 0x2ff));
+ EXPECT_EQ(Ranges[3].Value, 0x3);
+ EXPECT_EQ(Ranges[4].Range, AddressRange(0x2ff, 0x300));
+ EXPECT_EQ(Ranges[4].Value, 0xff);
+ EXPECT_EQ(Ranges[5].Range, AddressRange(0x300, 0x3ff));
+ EXPECT_EQ(Ranges[5].Value, 0x4);
+ EXPECT_EQ(Ranges[6].Range, AddressRange(0x3ff, 0x400));
+ EXPECT_EQ(Ranges[6].Value, 0x5);
+ EXPECT_EQ(Ranges[7].Range, AddressRange(0x400, 0x4ff));
+ EXPECT_EQ(Ranges[7].Value, 0x5);
+ EXPECT_EQ(Ranges[8].Range, AddressRange(0x500, 0x5ff));
+ EXPECT_EQ(Ranges[8].Value, 0x6);
+ EXPECT_EQ(Ranges[9].Range, AddressRange(0x600, 0x6ff));
+ EXPECT_EQ(Ranges[9].Value, 0x7);
+ EXPECT_EQ(Ranges[10].Range, AddressRange(0x6ff, 0x700));
+ EXPECT_EQ(Ranges[10].Value, 0x8);
+}
+
+TEST(AddressRangeTest, TestRangesMapRandom) {
+ AddressRangesMap Ranges;
+ size_t NumElements = 100;
+
+ std::srand(std::time(nullptr));
+
+ // Fill ranges. Use the same mapped value.
+ for (size_t Idx = 0; Idx < NumElements; Idx++) {
+ uint64_t Start = static_cast<uint64_t>(std::rand() % 1000);
+ uint64_t End = Start + static_cast<uint64_t>(std::rand() % 1000);
+ Ranges.insert({Start, End}, 0xffLL);
+ }
+
+ // Check ranges.
+ for (size_t Idx = 0; Idx + 1 < Ranges.size(); Idx++) {
+ // Check that ranges are not intersected.
+ EXPECT_FALSE(Ranges[Idx].Range.intersects(Ranges[Idx + 1].Range));
+
+ // Check that ranges are sorted and not adjusted.
+ EXPECT_TRUE(Ranges[Idx].Range.end() <= Ranges[Idx + 1].Range.start());
+ }
+
+ Ranges.clear();
+ // Fill ranges. Use the various mapped value.
+ for (size_t Idx = 0; Idx < NumElements; Idx++) {
+ uint64_t Start = static_cast<uint64_t>(std::rand() % 1000);
+ uint64_t End = Start + static_cast<uint64_t>(std::rand() % 1000);
+ int64_t Value = static_cast<int64_t>(std::rand() % 10);
+ Ranges.insert({Start, End}, Value);
+ }
+
+ // Check ranges.
+ for (size_t Idx = 0; Idx + 1 < Ranges.size(); Idx++) {
+ // Check that ranges are not intersected.
+ EXPECT_FALSE(Ranges[Idx].Range.intersects(Ranges[Idx + 1].Range));
+
+ // Check that ranges are sorted and not adjusted.
+ EXPECT_TRUE(Ranges[Idx].Range.end() <= Ranges[Idx + 1].Range.start());
+ }
}
--
2.39.0.1.g6739ec1790

View File

@ -1,13 +0,0 @@
diff --git a/lld/MachO/LTO.cpp b/lld/MachO/LTO.cpp
index 2f5e9d06f396..7fbb41bef53d 100644
--- a/lld/MachO/LTO.cpp
+++ b/lld/MachO/LTO.cpp
@@ -65,8 +65,6 @@ static lto::Config createConfig() {
pm.add(createObjCARCContractPass());
};
- c.AlwaysEmitRegularLTOObj = !config->ltoObjPath.empty();
-
c.TimeTraceEnabled = config->timeTraceEnabled;
c.TimeTraceGranularity = config->timeTraceGranularity;
c.OptLevel = config->ltoo;

View File

@ -1,180 +0,0 @@
From c8a5013045b5aff8e45418925688ca670545980f Mon Sep 17 00:00:00 2001
From: Mike Hommey <mh@glandium.org>
Date: Fri, 18 Mar 2022 17:58:28 +0900
Subject: [PATCH] Revert "[lsan] Move out suppression of invalid PCs from
StopTheWorld"
This reverts commit f86deb18cab6479a0961ade3807e4729f3a27bdf
because of permafail for a sizable amount of ASan test jobs, where the
worker would die without even leaving any logs.
---
compiler-rt/lib/lsan/lsan_common.cpp | 108 +++++++++++++++++----------
1 file changed, 67 insertions(+), 41 deletions(-)
diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp
index 51218770d6dc..0a69b010879b 100644
--- a/compiler-rt/lib/lsan/lsan_common.cpp
+++ b/compiler-rt/lib/lsan/lsan_common.cpp
@@ -83,11 +83,9 @@ class LeakSuppressionContext {
SuppressionContext context;
bool suppressed_stacks_sorted = true;
InternalMmapVector<u32> suppressed_stacks;
- const LoadedModule *suppress_module = nullptr;
- void LazyInit();
Suppression *GetSuppressionForAddr(uptr addr);
- bool SuppressInvalid(const StackTrace &stack);
+ void LazyInit();
bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
public:
@@ -138,8 +136,6 @@ void LeakSuppressionContext::LazyInit() {
if (&__lsan_default_suppressions)
context.Parse(__lsan_default_suppressions());
context.Parse(kStdSuppressions);
- if (flags()->use_tls && flags()->use_ld_allocations)
- suppress_module = GetLinker();
}
}
@@ -165,13 +161,6 @@ Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
return s;
}
-static uptr GetCallerPC(const StackTrace &stack) {
- // The top frame is our malloc/calloc/etc. The next frame is the caller.
- if (stack.size >= 2)
- return stack.trace[1];
- return 0;
-}
-
# if SANITIZER_APPLE
// Objective-C class data pointers are stored with flags in the low bits, so
// they need to be transformed back into something that looks like a pointer.
@@ -183,34 +172,6 @@ static inline void *MaybeTransformPointer(void *p) {
}
# endif
-// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
-// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
-// modules accounting etc.
-// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
-// They are allocated with a __libc_memalign() call in allocate_and_init()
-// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
-// blocks, but we can make sure they come from our own allocator by intercepting
-// __libc_memalign(). On top of that, there is no easy way to reach them. Their
-// addresses are stored in a dynamically allocated array (the DTV) which is
-// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
-// being reachable from the static TLS, and the dynamic TLS being reachable from
-// the DTV. This is because the initial DTV is allocated before our interception
-// mechanism kicks in, and thus we don't recognize it as allocated memory. We
-// can't special-case it either, since we don't know its size.
-// Our solution is to include in the root set all allocations made from
-// ld-linux.so (which is where allocate_and_init() is implemented). This is
-// guaranteed to include all dynamic TLS blocks (and possibly other allocations
-// which we don't care about).
-// On all other platforms, this simply checks to ensure that the caller pc is
-// valid before reporting chunks as leaked.
-bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
- uptr caller_pc = GetCallerPC(stack);
- // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
- // it as reachable, as we can't properly report its allocation stack anyway.
- return !caller_pc ||
- (suppress_module && suppress_module->containsAddress(caller_pc));
-}
-
bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
uptr hit_count, uptr total_size) {
for (uptr i = 0; i < stack.size; i++) {
@@ -229,7 +190,7 @@ bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
uptr total_size) {
LazyInit();
StackTrace stack = StackDepotGet(stack_trace_id);
- if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
+ if (!SuppressByRule(stack, hit_count, total_size))
return false;
suppressed_stacks_sorted = false;
suppressed_stacks.push_back(stack_trace_id);
@@ -600,6 +561,68 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
}
}
+static uptr GetCallerPC(const StackTrace &stack) {
+ // The top frame is our malloc/calloc/etc. The next frame is the caller.
+ if (stack.size >= 2)
+ return stack.trace[1];
+ return 0;
+}
+
+struct InvalidPCParam {
+ Frontier *frontier;
+ bool skip_linker_allocations;
+};
+
+// ForEachChunk callback. If the caller pc is invalid or is within the linker,
+// mark as reachable. Called by ProcessPlatformSpecificAllocations.
+static void MarkInvalidPCCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
+ u32 stack_id = m.stack_trace_id();
+ uptr caller_pc = 0;
+ if (stack_id > 0)
+ caller_pc = GetCallerPC(StackDepotGet(stack_id));
+ // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
+ // it as reachable, as we can't properly report its allocation stack anyway.
+ if (caller_pc == 0 || (param->skip_linker_allocations &&
+ GetLinker()->containsAddress(caller_pc))) {
+ m.set_tag(kIgnored);
+ param->frontier->push_back(chunk);
+ }
+ }
+}
+
+// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
+// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
+// modules accounting etc.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
+// On all other platforms, this simply checks to ensure that the caller pc is
+// valid before reporting chunks as leaked.
+static void ProcessPC(Frontier *frontier) {
+ InvalidPCParam arg;
+ arg.frontier = frontier;
+ arg.skip_linker_allocations =
+ flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
+ ForEachChunk(MarkInvalidPCCb, &arg);
+}
+
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
Frontier *frontier, tid_t caller_tid,
@@ -616,6 +639,9 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
ProcessRootRegions(frontier);
FloodFillTag(frontier, kReachable);
+ CHECK_EQ(0, frontier->size());
+ ProcessPC(frontier);
+
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
--
2.35.0.1.g829a698654

View File

@ -1,172 +0,0 @@
From cf00b30288c4c81b2c6a5af01c38f236148777a0 Mon Sep 17 00:00:00 2001
From: Mike Hommey <mh@glandium.org>
Date: Tue, 28 Mar 2023 06:13:36 +0900
Subject: [PATCH] Revert "[Passes][VectorCombine] enable early run generally
and try load folds"
This reverts commit 163bb6d64e5f1220777c3ec2a8b58c0666a74d91.
It causes various reftest regressions.
---
llvm/lib/Passes/PassBuilderPipelines.cpp | 7 ++++---
llvm/lib/Transforms/Vectorize/VectorCombine.cpp | 8 ++------
llvm/test/Other/new-pm-defaults.ll | 2 +-
llvm/test/Other/new-pm-thinlto-defaults.ll | 1 -
.../Other/new-pm-thinlto-postlink-pgo-defaults.ll | 1 -
.../new-pm-thinlto-postlink-samplepgo-defaults.ll | 1 -
.../Other/new-pm-thinlto-prelink-pgo-defaults.ll | 1 -
.../new-pm-thinlto-prelink-samplepgo-defaults.ll | 1 -
.../PhaseOrdering/X86/vec-load-combine.ll | 15 +++++++++++----
9 files changed, 18 insertions(+), 19 deletions(-)
diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index eed29c25714b..b925448cd6c0 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -611,9 +611,10 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
// Delete small array after loop unroll.
FPM.addPass(SROAPass(SROAOptions::ModifyCFG));
- // Try vectorization/scalarization transforms that are both improvements
- // themselves and can allow further folds with GVN and InstCombine.
- FPM.addPass(VectorCombinePass(/*TryEarlyFoldsOnly=*/true));
+ // The matrix extension can introduce large vector operations early, which can
+ // benefit from running vector-combine early on.
+ if (EnableMatrix)
+ FPM.addPass(VectorCombinePass(/*TryEarlyFoldsOnly=*/true));
// Eliminate redundancies.
FPM.addPass(MergedLoadStoreMotionPass());
diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index 2e489757ebc1..810a9f92bb7a 100644
--- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -1720,12 +1720,6 @@ bool VectorCombine::run() {
// dispatching to folding functions if there's no chance of matching.
if (IsFixedVectorType) {
switch (Opcode) {
- case Instruction::InsertElement:
- MadeChange |= vectorizeLoadInsert(I);
- break;
- case Instruction::ShuffleVector:
- MadeChange |= widenSubvectorLoad(I);
- break;
case Instruction::Load:
MadeChange |= scalarizeLoadExtract(I);
break;
@@ -1754,9 +1748,11 @@ bool VectorCombine::run() {
if (IsFixedVectorType) {
switch (Opcode) {
case Instruction::InsertElement:
+ MadeChange |= vectorizeLoadInsert(I);
MadeChange |= foldInsExtFNeg(I);
break;
case Instruction::ShuffleVector:
+ MadeChange |= widenSubvectorLoad(I);
MadeChange |= foldShuffleOfBinops(I);
MadeChange |= foldSelectShuffle(I);
break;
diff --git a/llvm/test/Other/new-pm-defaults.ll b/llvm/test/Other/new-pm-defaults.ll
index 13612c3bb459..5f84d28af4a6 100644
--- a/llvm/test/Other/new-pm-defaults.ll
+++ b/llvm/test/Other/new-pm-defaults.ll
@@ -186,7 +186,7 @@
; CHECK-O-NEXT: Running pass: LoopFullUnrollPass
; CHECK-EP-LOOP-END-NEXT: Running pass: NoOpLoopPass
; CHECK-O-NEXT: Running pass: SROAPass on foo
-; CHECK-O23SZ-NEXT: Running pass: VectorCombinePass
+; CHECK-MATRIX: Running pass: VectorCombinePass
; CHECK-O23SZ-NEXT: Running pass: MergedLoadStoreMotionPass
; CHECK-O23SZ-NEXT: Running pass: GVNPass
; CHECK-O23SZ-NEXT: Running analysis: MemoryDependenceAnalysis
diff --git a/llvm/test/Other/new-pm-thinlto-defaults.ll b/llvm/test/Other/new-pm-thinlto-defaults.ll
index 3f5d2d5b153d..ea07128c9f6a 100644
--- a/llvm/test/Other/new-pm-thinlto-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-defaults.ll
@@ -159,7 +159,6 @@
; CHECK-O-NEXT: Running pass: LoopDeletionPass
; CHECK-O-NEXT: Running pass: LoopFullUnrollPass
; CHECK-O-NEXT: Running pass: SROAPass on foo
-; CHECK-O23SZ-NEXT: Running pass: VectorCombinePass
; CHECK-O23SZ-NEXT: Running pass: MergedLoadStoreMotionPass
; CHECK-O23SZ-NEXT: Running pass: GVNPass
; CHECK-O23SZ-NEXT: Running analysis: MemoryDependenceAnalysis
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
index 29021ceace54..43e943cb6011 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
@@ -121,7 +121,6 @@
; CHECK-O-NEXT: Running pass: LoopDeletionPass
; CHECK-O-NEXT: Running pass: LoopFullUnrollPass
; CHECK-O-NEXT: Running pass: SROAPass on foo
-; CHECK-O23SZ-NEXT: Running pass: VectorCombinePass
; CHECK-O23SZ-NEXT: Running pass: MergedLoadStoreMotionPass
; CHECK-O23SZ-NEXT: Running pass: GVNPass
; CHECK-O23SZ-NEXT: Running analysis: MemoryDependenceAnalysis
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
index daf3141a1f2c..78914d1c23b2 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
@@ -130,7 +130,6 @@
; CHECK-O-NEXT: Running pass: LoopDeletionPass
; CHECK-O-NEXT: Running pass: LoopFullUnrollPass
; CHECK-O-NEXT: Running pass: SROAPass on foo
-; CHECK-O23SZ-NEXT: Running pass: VectorCombinePass
; CHECK-O23SZ-NEXT: Running pass: MergedLoadStoreMotionPass
; CHECK-O23SZ-NEXT: Running pass: GVNPass
; CHECK-O23SZ-NEXT: Running analysis: MemoryDependenceAnalysis
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
index bfe80902f806..5b62ba39add3 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
@@ -160,7 +160,6 @@
; CHECK-O-NEXT: Running pass: LoopDeletionPass
; CHECK-O-NEXT: Running pass: LoopFullUnrollPass
; CHECK-O-NEXT: Running pass: SROAPass on foo
-; CHECK-O23SZ-NEXT: Running pass: VectorCombinePass
; CHECK-O23SZ-NEXT: Running pass: MergedLoadStoreMotionPass
; CHECK-O23SZ-NEXT: Running pass: GVNPass
; CHECK-O23SZ-NEXT: Running analysis: MemoryDependenceAnalysis
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
index c7daf7aa46b1..17475423d696 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
@@ -124,7 +124,6 @@
; CHECK-O-NEXT: Running pass: IndVarSimplifyPass
; CHECK-O-NEXT: Running pass: LoopDeletionPass
; CHECK-O-NEXT: Running pass: SROAPass on foo
-; CHECK-O23SZ-NEXT: Running pass: VectorCombinePass
; CHECK-O23SZ-NEXT: Running pass: MergedLoadStoreMotionPass
; CHECK-O23SZ-NEXT: Running pass: GVNPass
; CHECK-O23SZ-NEXT: Running analysis: MemoryDependenceAnalysis
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll b/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll
index 77cbc70ff369..dd7164febea4 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll
@@ -12,13 +12,20 @@ $getAt = comdat any
define dso_local noundef <4 x float> @ConvertVectors_ByRef(ptr noundef nonnull align 16 dereferenceable(16) %0) #0 {
; SSE-LABEL: @ConvertVectors_ByRef(
; SSE-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[TMP0:%.*]], align 16
-; SSE-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
-; SSE-NEXT: ret <4 x float> [[TMP3]]
+; SSE-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x float], ptr [[TMP0]], i64 0, i64 1
+; SSE-NEXT: [[TMP4:%.*]] = load <2 x float>, ptr [[TMP3]], align 4
+; SSE-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+; SSE-NEXT: [[TMP6:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 4, i32 5, i32 undef>
+; SSE-NEXT: [[TMP7:%.*]] = shufflevector <4 x float> [[TMP6]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 5>
+; SSE-NEXT: ret <4 x float> [[TMP7]]
;
; AVX-LABEL: @ConvertVectors_ByRef(
; AVX-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[TMP0:%.*]], align 16
-; AVX-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
-; AVX-NEXT: ret <4 x float> [[TMP3]]
+; AVX-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x float], ptr [[TMP0]], i64 0, i64 2
+; AVX-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP3]], align 8
+; AVX-NEXT: [[TMP5:%.*]] = insertelement <4 x float> [[TMP2]], float [[TMP4]], i64 2
+; AVX-NEXT: [[TMP6:%.*]] = insertelement <4 x float> [[TMP5]], float [[TMP4]], i64 3
+; AVX-NEXT: ret <4 x float> [[TMP6]]
;
%2 = alloca ptr, align 8
%3 = alloca <4 x float>, align 16
--
2.39.0.1.g6739ec1790

View File

@ -354,13 +354,6 @@ clang-14:
repo: https://github.com/llvm/llvm-project
revision: 4bc1d0b51c8e488d78ab69c8b19cfbcd1f7db6a4
clang-16:
description: clang 16.0.6 source code
fetch:
type: git
repo: https://github.com/llvm/llvm-project
revision: 7cbf1a2591520c2491aa35339f227775f4d3adf6
clang-17:
description: clang 17.0.1 source code
fetch:

View File

@ -111,366 +111,6 @@ macosx64-clang-14-raw:
- linux64-clang-14-stage1
- macosx64-sdk-toolchain
linux64-clang-16-mingw-x86:
description: "MinGW-Clang 16 x86 toolchain build"
treeherder:
symbol: TMW(clang-16-x86)
worker-type: b-linux-gcp
run:
script: build-clang-mingw.sh
arguments:
- 'x86'
resources:
- 'taskcluster/scripts/misc/mingw-*.patch'
toolchain-artifact: public/build/clangmingw.tar.zst
fetches:
fetch:
- clang-16
- mingw-w64
- llvm-mingw
- gcc-9.5.0
toolchain:
- linux64-clang-16
linux64-clang-16-mingw-x64:
description: "MinGW-Clang 16 x64 toolchain build"
treeherder:
symbol: TMW(clang-16-x64)
tier: 1
worker-type: b-linux-gcp
run:
script: build-clang-mingw.sh
arguments:
- 'x64'
resources:
- 'taskcluster/scripts/misc/mingw-*.patch'
toolchain-artifact: public/build/clangmingw.tar.zst
fetches:
fetch:
- clang-16
- mingw-w64
- llvm-mingw
- gcc-9.5.0
toolchain:
- linux64-clang-16
linux64-clang-16-stage1:
description: "Clang 16 toolchain build"
treeherder:
symbol: TL(clang-16-stage1)
run:
using: toolchain-script
script: build-clang.sh
arguments:
- 'build/build-clang/linux64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/1stage.json'
resources:
- 'build/build-clang/linux64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/1stage.json'
toolchain-artifact: public/build/clang.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-toolchain-sysroot
linux64-clang-16-profile:
description: "Clang 16 toolchain build"
treeherder:
symbol: TL(clang-16-profile)
run:
using: toolchain-script
script: build-clang.sh
arguments:
- 'build/build-clang/linux64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/skip-stage-1.json'
- 'build/build-clang/profile.json'
resources:
- 'build/build-clang/linux64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/skip-stage-1.json'
- 'build/build-clang/profile.json'
toolchain-artifact: public/build/merged.profdata
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- linux64-x64-compiler-rt-16
- linux64-toolchain-sysroot
linux64-clang-16-raw:
description: "Clang 16 toolchain build"
treeherder:
symbol: TL(clang-16-raw)
run:
script: build-clang.sh
arguments:
- 'build/build-clang/linux64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/skip-3-stages.json'
- 'build/build-clang/4stages-pgo.json'
resources:
- 'build/build-clang/linux64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/skip-3-stages.json'
- 'build/build-clang/4stages-pgo.json'
toolchain-artifact: public/build/clang.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- linux64-clang-16-profile
- linux64-toolchain-sysroot
linux64-clang-16:
description: "Clang 16 toolchain build"
attributes:
local-toolchain: true
treeherder:
symbol: TL(clang-16)
worker-type: b-linux-gcp
worker:
max-run-time: 600
run:
script: repack-clang.sh
toolchain-artifact: public/build/clang.tar.zst
fetches:
toolchain:
- linux64-cctools-port
- linux64-clang-16-raw
- android-aarch64-compiler-rt-16
- android-aarch64-libunwind-16
- android-arm-compiler-rt-16
- android-arm-libunwind-16
- android-x64-compiler-rt-16
- android-x64-libunwind-16
- android-x86-compiler-rt-16
- android-x86-libunwind-16
- linux64-aarch64-compiler-rt-16
- linux64-x64-compiler-rt-16
- linux64-x86-compiler-rt-16
- macosx64-aarch64-compiler-rt-16
- macosx64-x64-compiler-rt-16
- wasm32-wasi-compiler-rt-16
- win32-compiler-rt-16
- win64-compiler-rt-16
macosx64-clang-16-raw:
description: "Clang 16 toolchain build"
treeherder:
symbol: TM(clang-16-raw)
worker-type: b-linux-large-gcp
worker:
max-run-time: 3600
run:
script: build-clang.sh
arguments:
- 'build/build-clang/macosx64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/skip-3-stages.json'
- 'build/build-clang/4stages-pgo.json'
resources:
- 'build/build-clang/macosx64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/skip-3-stages.json'
- 'build/build-clang/4stages-pgo.json'
toolchain-artifact: public/build/clang.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- linux64-clang-16-profile
- macosx64-sdk-toolchain
- macosx64-x64-compiler-rt-16
macosx64-clang-16:
description: "Clang 16 toolchain repack with MacOS Compiler RT libs"
attributes:
local-toolchain: true
treeherder:
symbol: TM(clang-16)
worker-type: b-linux-gcp
worker:
max-run-time: 600
run:
script: repack-clang.sh
toolchain-artifact: public/build/clang.tar.zst
fetches:
toolchain:
- linux64-cctools-port
- macosx64-clang-16-raw
- android-aarch64-compiler-rt-16
- android-aarch64-libunwind-16
- android-arm-compiler-rt-16
- android-arm-libunwind-16
- android-x64-compiler-rt-16
- android-x64-libunwind-16
- android-x86-compiler-rt-16
- android-x86-libunwind-16
- linux64-aarch64-compiler-rt-16
- linux64-x64-compiler-rt-16
- linux64-x86-compiler-rt-16
- macosx64-aarch64-compiler-rt-16
- macosx64-x64-compiler-rt-16
- wasm32-wasi-compiler-rt-16
- win32-compiler-rt-16
- win64-compiler-rt-16
macosx64-aarch64-clang-16-raw:
description: "Clang 16 toolchain build"
treeherder:
symbol: TM(clang-16-aarch64-raw)
worker-type: b-linux-large-gcp
worker:
max-run-time: 3600
run:
script: build-clang.sh
arguments:
- 'build/build-clang/macosx64.json'
- 'build/build-clang/macosx64-aarch64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/skip-3-stages.json'
- 'build/build-clang/4stages-pgo.json'
resources:
- 'build/build-clang/macosx64.json'
- 'build/build-clang/macosx64-aarch64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/skip-3-stages.json'
- 'build/build-clang/4stages-pgo.json'
toolchain-artifact: public/build/clang.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- linux64-clang-16-profile
- macosx64-sdk-toolchain
- macosx64-aarch64-compiler-rt-16
macosx64-aarch64-clang-16:
description: "Clang 16 toolchain repack with MacOS Compiler RT libs"
attributes:
local-toolchain: true
treeherder:
symbol: TM(clang-16-aarch64)
worker-type: b-linux-gcp
worker:
max-run-time: 600
run:
script: repack-clang.sh
toolchain-artifact: public/build/clang.tar.zst
fetches:
toolchain:
- linux64-cctools-port
- macosx64-aarch64-clang-16-raw
- android-aarch64-compiler-rt-16
- android-aarch64-libunwind-16
- android-arm-compiler-rt-16
- android-arm-libunwind-16
- android-x64-compiler-rt-16
- android-x64-libunwind-16
- android-x86-compiler-rt-16
- android-x86-libunwind-16
- linux64-aarch64-compiler-rt-16
- linux64-x64-compiler-rt-16
- linux64-x86-compiler-rt-16
- macosx64-aarch64-compiler-rt-16
- macosx64-x64-compiler-rt-16
- wasm32-wasi-compiler-rt-16
- win32-compiler-rt-16
- win64-compiler-rt-16
win64-clang-16-stage1:
description: "Clang-cl 16 toolchain build stage 1"
treeherder:
symbol: TW64(clang-16-stage1)
run:
script: build-clang.sh
arguments:
- 'build/build-clang/win64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/1stage.json'
resources:
- 'build/build-clang/win64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/1stage.json'
toolchain-artifact: public/build/clang.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- vs-toolchain
win64-clang-16-raw:
description: "Clang-cl 16 toolchain build"
treeherder:
symbol: TW64(clang-16-raw)
worker-type: b-win2022
worker:
max-run-time: 9000
run:
script: build-clang.sh
arguments:
- 'build/build-clang/win64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/skip-stage-1-win64.json'
- 'build/build-clang/4stages-pgo.json'
resources:
- 'build/build-clang/win64.json'
- 'build/build-clang/clang-16.json'
- 'build/build-clang/skip-stage-1-win64.json'
- 'build/build-clang/4stages-pgo.json'
toolchain-artifact: public/build/clang.tar.zst
fetches:
fetch:
- clang-16
- cmake
- ninja
toolchain:
- win64-clang-16-stage1
- win64-compiler-rt-16
- vs-toolchain
win64-clang-16:
description: "Clang-cl 16 toolchain build"
attributes:
local-toolchain: true
treeherder:
symbol: TW64(clang-16)
worker-type: b-linux-gcp
worker:
max-run-time: 600
run:
script: repack-clang.sh
toolchain-artifact: public/build/clang.tar.zst
fetches:
toolchain:
- linux64-cctools-port
- win64-clang-16-raw
- android-aarch64-compiler-rt-16
- android-aarch64-libunwind-16
- android-arm-compiler-rt-16
- android-arm-libunwind-16
- android-x64-compiler-rt-16
- android-x64-libunwind-16
- android-x86-compiler-rt-16
- android-x86-libunwind-16
- linux64-aarch64-compiler-rt-16
- linux64-x64-compiler-rt-16
- linux64-x86-compiler-rt-16
- macosx64-aarch64-compiler-rt-16
- macosx64-x64-compiler-rt-16
- wasm32-wasi-compiler-rt-16
- win32-compiler-rt-16
- win64-compiler-rt-16
linux64-clang-17-mingw-x86:
description: "MinGW-Clang 17 x86 toolchain build"
treeherder:

View File

@ -29,220 +29,6 @@ wasm32-wasi-compiler-rt-8.0:
toolchain:
- linux64-clang-8.0-raw
android-aarch64-compiler-rt-16:
description: "android aarch64 Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TA(aarch64-crt-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/compiler-rt-aarch64-linux-android.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- linux64-android-ndk-linux-repack
android-arm-compiler-rt-16:
description: "android arm Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TA(arm-crt-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/compiler-rt-armv7-linux-android.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- linux64-android-ndk-linux-repack
android-x86-compiler-rt-16:
description: "android x86 Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TA(x86-crt-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/compiler-rt-i686-linux-android.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- linux64-android-ndk-linux-repack
android-x64-compiler-rt-16:
description: "android x64 Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TA(x64-crt-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/compiler-rt-x86_64-linux-android.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- linux64-android-ndk-linux-repack
linux64-x86-compiler-rt-16:
description: "Linux x86 Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TL(x86-crt-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/compiler-rt-i686-unknown-linux-gnu.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- sysroot-i686-linux-gnu
linux64-x64-compiler-rt-16:
description: "Linux x64 Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TL(x64-crt-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/compiler-rt-x86_64-unknown-linux-gnu.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- sysroot-x86_64-linux-gnu
linux64-aarch64-compiler-rt-16:
description: "Linux aarch64 Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TL(aarch64-crt-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/compiler-rt-aarch64-unknown-linux-gnu.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- sysroot-aarch64-linux-gnu
macosx64-x64-compiler-rt-16:
description: "macOS x64 Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TM(x64-crt-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/compiler-rt-x86_64-apple-darwin.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- macosx64-sdk-toolchain
macosx64-aarch64-compiler-rt-16:
description: "macOS aarch64 Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TM(aarch64-crt-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/compiler-rt-aarch64-apple-darwin.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- macosx64-sdk-toolchain
win32-compiler-rt-16:
description: "win32 x86 Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TW32(crt-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/compiler-rt-i686-pc-windows-msvc.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- vs-toolchain
win64-compiler-rt-16:
description: "win64 x64 Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TW64(crt-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/compiler-rt-x86_64-pc-windows-msvc.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- linux64-clang-16-stage1
- vs-toolchain
wasm32-wasi-compiler-rt-16:
description: "wasm32-wasi Compiler-rt for Clang 16 toolchain build"
treeherder:
symbol: TL(wasi-crt-16)
worker-type: b-linux-xlarge-gcp
run:
script: build-compiler-rt-wasi.sh
toolchain-artifact: public/build/compiler-rt-wasm32-wasi.tar.zst
toolchain-alias: wasm32-wasi-compiler-rt
fetches:
fetch:
- clang-16
- wasi-sdk
toolchain:
- linux64-clang-16-stage1
android-aarch64-compiler-rt-17:
description: "android aarch64 Compiler-rt for Clang 17 toolchain build"
treeherder:

View File

@ -10,82 +10,6 @@ job-defaults:
using: toolchain-script
script: build-libunwind.sh
android-aarch64-libunwind-16:
description: "android aarch64 libunwind for Clang 16 toolchain build"
treeherder:
symbol: TA(aarch64-unwind-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/libunwind-aarch64-linux-android.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- android-aarch64-compiler-rt-16
- linux64-clang-16-stage1
- linux64-android-ndk-linux-repack
android-arm-libunwind-16:
description: "android arm libunwind for Clang 16 toolchain build"
treeherder:
symbol: TA(arm-unwind-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/libunwind-armv7-linux-android.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- android-arm-compiler-rt-16
- linux64-clang-16-stage1
- linux64-android-ndk-linux-repack
android-x86-libunwind-16:
description: "android x86 libunwind for Clang 16 toolchain build"
treeherder:
symbol: TA(x86-unwind-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/libunwind-i686-linux-android.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- android-x86-compiler-rt-16
- linux64-clang-16-stage1
- linux64-android-ndk-linux-repack
android-x64-libunwind-16:
description: "android x64 libunwind for Clang 16 toolchain build"
treeherder:
symbol: TA(x64-unwind-16)
run:
arguments:
- build/build-clang/clang-16.json
resources:
- build/build-clang/clang-16.json
- taskcluster/scripts/misc/build-llvm-common.sh
toolchain-artifact: public/build/libunwind-x86_64-linux-android.tar.zst
fetches:
fetch:
- clang-16
toolchain:
- android-x64-compiler-rt-16
- linux64-clang-16-stage1
- linux64-android-ndk-linux-repack
android-aarch64-libunwind-17:
description: "android aarch64 libunwind for Clang 17 toolchain build"
treeherder:

View File

@ -77,23 +77,6 @@ sysroot-wasm32-wasi-clang-8.0:
- linux64-clang-8.0
- wasm32-wasi-compiler-rt-8.0
sysroot-wasm32-wasi-clang-16:
description: "Sysroot for wasi"
attributes:
local-toolchain: true
treeherder:
symbol: TL(sysroot-wasi-16)
run:
script: build-sysroot-wasi.sh
toolchain-artifact: public/build/sysroot-wasm32-wasi.tar.zst
fetches:
fetch:
- clang-16
- wasi-sdk
toolchain:
- linux64-clang-16-stage1
- wasm32-wasi-compiler-rt-16
sysroot-wasm32-wasi-clang-17:
description: "Sysroot for wasi"
attributes: