Update Source to objc4-818.2

This commit is contained in:
Thomas A 2022-03-31 21:15:07 -07:00
parent c5d1e278e2
commit 1243e13d05
161 changed files with 7983 additions and 2321 deletions

View File

@ -19,8 +19,6 @@ __objc_update_stubs_in_mach_header
_sel_init
___sel_registerName
__objc_search_builtins
__ZNK8objc_opt13objc_selopt_t3getEPKc
__ZNK8objc_opt13objc_selopt_t4hashEPKc
_sel_registerName
_arr_init
__ZN4objc8DenseMapIP11objc_objectmLb1ENS_12DenseMapInfoIS2_EENS3_ImEEE4initEj

View File

@ -391,6 +391,14 @@ void dosect(uint8_t *start, macho_section<P> *sect)
sect->set_sectname("__objc_init_func");
if (debug) printf("disabled __mod_init_func section\n");
}
if (segnameStartsWith(sect->segname(), "__TEXT") &&
sectnameEquals(sect->sectname(), "__init_offsets"))
{
// section type 0 is S_REGULAR
sect->set_flags(sect->flags() & ~SECTION_TYPE);
sect->set_sectname("__objc_init_offs");
if (debug) printf("disabled __mod_init_func section\n");
}
if (segnameStartsWith(sect->segname(), "__DATA") &&
sectnameEquals(sect->sectname(), "__mod_term_func"))
{

0
objc.sln Normal file → Executable file
View File

View File

@ -7,6 +7,17 @@
objects = {
/* Begin PBXAggregateTarget section */
6EF877EF23263D7000963DBB /* objc_executables */ = {
isa = PBXAggregateTarget;
buildConfigurationList = 6EF877F223263D7000963DBB /* Build configuration list for PBXAggregateTarget "objc_executables" */;
buildPhases = (
);
dependencies = (
6EF877F423263D8000963DBB /* PBXTargetDependency */,
);
name = objc_executables;
productName = "objc-executables";
};
834F9B01212E560100F95A54 /* objc4_tests */ = {
isa = PBXAggregateTarget;
buildConfigurationList = 834F9B04212E560200F95A54 /* Build configuration list for PBXAggregateTarget "objc4_tests" */;
@ -45,6 +56,13 @@
6EACB842232C97A400CE9176 /* objc-zalloc.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EACB841232C97A400CE9176 /* objc-zalloc.h */; };
6EACB844232C97B900CE9176 /* objc-zalloc.mm in Sources */ = {isa = PBXBuildFile; fileRef = 6EACB843232C97B900CE9176 /* objc-zalloc.mm */; };
6ECD0B1F2244999E00910D88 /* llvm-DenseSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 6ECD0B1E2244999E00910D88 /* llvm-DenseSet.h */; };
6EF877DA2325D62600963DBB /* objcdt.mm in Sources */ = {isa = PBXBuildFile; fileRef = 6EF877D92325D62600963DBB /* objcdt.mm */; };
6EF877DE2325D79000963DBB /* objc-probes.d in Sources */ = {isa = PBXBuildFile; fileRef = 87BB4E900EC39633005D08E1 /* objc-probes.d */; };
6EF877E02325D92E00963DBB /* CoreSymbolication.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 6EF877DF2325D92E00963DBB /* CoreSymbolication.framework */; };
6EF877E22325D93200963DBB /* Symbolication.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 6EF877E12325D93200963DBB /* Symbolication.framework */; };
6EF877E52325FAC400963DBB /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 6EF877E42325FAC400963DBB /* Foundation.framework */; };
6EF877E82326184000963DBB /* json.mm in Sources */ = {isa = PBXBuildFile; fileRef = 6EF877E72326184000963DBB /* json.mm */; };
6EF877EC232635A700963DBB /* objcdt.1 in Install Manpages */ = {isa = PBXBuildFile; fileRef = 6EF877EA232633CC00963DBB /* objcdt.1 */; };
7213C36321FA7C730090A271 /* NSObject-internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 7213C36221FA7C730090A271 /* NSObject-internal.h */; settings = {ATTRIBUTES = (Private, ); }; };
7593EC58202248E50046AB96 /* objc-object.h in Headers */ = {isa = PBXBuildFile; fileRef = 7593EC57202248DF0046AB96 /* objc-object.h */; };
75A9504F202BAA0600D7D56F /* objc-locks-new.h in Headers */ = {isa = PBXBuildFile; fileRef = 75A9504E202BAA0300D7D56F /* objc-locks-new.h */; };
@ -128,12 +146,22 @@
83F550E0155E030800E95D3B /* objc-cache-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83F550DF155E030800E95D3B /* objc-cache-old.mm */; };
87BB4EA70EC39854005D08E1 /* objc-probes.d in Sources */ = {isa = PBXBuildFile; fileRef = 87BB4E900EC39633005D08E1 /* objc-probes.d */; };
9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9672F7ED14D5F488007CEC96 /* NSObject.mm */; };
C22F5208230EF38B001BFE14 /* objc-ptrauth.h in Headers */ = {isa = PBXBuildFile; fileRef = C22F5207230EF38B001BFE14 /* objc-ptrauth.h */; };
C2E6D3FC2225DCF00059DFAA /* DenseMapExtras.h in Headers */ = {isa = PBXBuildFile; fileRef = C2E6D3FB2225DCF00059DFAA /* DenseMapExtras.h */; };
C2EB731D23D8A38A0040672B /* dummy-library-mac-i386.c in Sources */ = {isa = PBXBuildFile; fileRef = C2EB731C23D8A38A0040672B /* dummy-library-mac-i386.c */; };
E8923DA5116AB2820071B552 /* objc-block-trampolines.mm in Sources */ = {isa = PBXBuildFile; fileRef = E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */; };
E934A9F123E996D00088F26F /* objc4.plist in CopyFiles */ = {isa = PBXBuildFile; fileRef = E934A9EF23E9967D0088F26F /* objc4.plist */; settings = {ATTRIBUTES = (CodeSignOnCopy, ); }; };
F9BCC71B205C68E800DD9AFC /* objc-blocktramps-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 8379996D13CBAF6F007C2B5F /* objc-blocktramps-arm64.s */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
6EF877F323263D8000963DBB /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */;
proxyType = 1;
remoteGlobalIDString = 6EF877D62325D62600963DBB;
remoteInfo = objcdt;
};
837F67AC1A771F6E004D34FA /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */;
@ -150,6 +178,30 @@
};
/* End PBXContainerItemProxy section */
/* Begin PBXCopyFilesBuildPhase section */
6EF877D52325D62600963DBB /* Install Manpages */ = {
isa = PBXCopyFilesBuildPhase;
buildActionMask = 2147483647;
dstPath = /usr/local/share/man/man1/;
dstSubfolderSpec = 0;
files = (
6EF877EC232635A700963DBB /* objcdt.1 in Install Manpages */,
);
name = "Install Manpages";
runOnlyForDeploymentPostprocessing = 1;
};
E934A9F023E996CC0088F26F /* CopyFiles */ = {
isa = PBXCopyFilesBuildPhase;
buildActionMask = 8;
dstPath = /System/Library/FeatureFlags/Domain;
dstSubfolderSpec = 0;
files = (
E934A9F123E996D00088F26F /* objc4.plist in CopyFiles */,
);
runOnlyForDeploymentPostprocessing = 1;
};
/* End PBXCopyFilesBuildPhase section */
/* Begin PBXFileReference section */
393CEABF0DC69E3E000B69DE /* objc-references.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-references.mm"; path = "runtime/objc-references.mm"; sourceTree = "<group>"; };
393CEAC50DC69E67000B69DE /* objc-references.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-references.h"; path = "runtime/objc-references.h"; sourceTree = "<group>"; };
@ -164,6 +216,15 @@
6EACB841232C97A400CE9176 /* objc-zalloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-zalloc.h"; path = "runtime/objc-zalloc.h"; sourceTree = "<group>"; };
6EACB843232C97B900CE9176 /* objc-zalloc.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-zalloc.mm"; path = "runtime/objc-zalloc.mm"; sourceTree = "<group>"; };
6ECD0B1E2244999E00910D88 /* llvm-DenseSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "llvm-DenseSet.h"; path = "runtime/llvm-DenseSet.h"; sourceTree = "<group>"; };
6EF877D72325D62600963DBB /* objcdt */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = objcdt; sourceTree = BUILT_PRODUCTS_DIR; };
6EF877D92325D62600963DBB /* objcdt.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = objcdt.mm; sourceTree = "<group>"; usesTabs = 0; };
6EF877DF2325D92E00963DBB /* CoreSymbolication.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreSymbolication.framework; path = System/Library/PrivateFrameworks/CoreSymbolication.framework; sourceTree = SDKROOT; };
6EF877E12325D93200963DBB /* Symbolication.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Symbolication.framework; path = System/Library/PrivateFrameworks/Symbolication.framework; sourceTree = SDKROOT; };
6EF877E32325D95300963DBB /* objcdt-entitlements.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = "objcdt-entitlements.plist"; sourceTree = "<group>"; };
6EF877E42325FAC400963DBB /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; };
6EF877E62326184000963DBB /* json.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = json.h; sourceTree = "<group>"; usesTabs = 1; };
6EF877E72326184000963DBB /* json.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = json.mm; sourceTree = "<group>"; usesTabs = 1; };
6EF877EA232633CC00963DBB /* objcdt.1 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.man; path = objcdt.1; sourceTree = "<group>"; };
7213C36221FA7C730090A271 /* NSObject-internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "NSObject-internal.h"; path = "runtime/NSObject-internal.h"; sourceTree = "<group>"; };
7593EC57202248DF0046AB96 /* objc-object.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-object.h"; path = "runtime/objc-object.h"; sourceTree = "<group>"; };
75A9504E202BAA0300D7D56F /* objc-locks-new.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-locks-new.h"; path = "runtime/objc-locks-new.h"; sourceTree = "<group>"; };
@ -252,15 +313,32 @@
87BB4E900EC39633005D08E1 /* objc-probes.d */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.dtrace; name = "objc-probes.d"; path = "runtime/objc-probes.d"; sourceTree = "<group>"; };
9672F7ED14D5F488007CEC96 /* NSObject.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = NSObject.mm; path = runtime/NSObject.mm; sourceTree = "<group>"; };
BC8B5D1212D3D48100C78A5B /* libauto.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libauto.dylib; path = /usr/lib/libauto.dylib; sourceTree = "<absolute>"; };
C217B55222DE556D004369BA /* objc-env.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-env.h"; path = "runtime/objc-env.h"; sourceTree = "<group>"; };
C2296C682457336C003FAE61 /* objc-bp-assist.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-bp-assist.h"; path = "runtime/objc-bp-assist.h"; sourceTree = "<group>"; };
C22F5207230EF38B001BFE14 /* objc-ptrauth.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-ptrauth.h"; path = "runtime/objc-ptrauth.h"; sourceTree = "<group>"; };
C2E6D3FB2225DCF00059DFAA /* DenseMapExtras.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DenseMapExtras.h; path = runtime/DenseMapExtras.h; sourceTree = "<group>"; };
C2EB731C23D8A38A0040672B /* dummy-library-mac-i386.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "dummy-library-mac-i386.c"; path = "runtime/dummy-library-mac-i386.c"; sourceTree = "<group>"; };
D2AAC0630554660B00DB518D /* libobjc.A.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libobjc.A.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
E8923D9C116AB2820071B552 /* objc-blocktramps-i386.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-blocktramps-i386.s"; path = "runtime/objc-blocktramps-i386.s"; sourceTree = "<group>"; };
E8923D9D116AB2820071B552 /* objc-blocktramps-x86_64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-blocktramps-x86_64.s"; path = "runtime/objc-blocktramps-x86_64.s"; sourceTree = "<group>"; };
E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-block-trampolines.mm"; path = "runtime/objc-block-trampolines.mm"; sourceTree = "<group>"; };
E934A9EF23E9967D0088F26F /* objc4.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = objc4.plist; sourceTree = "<group>"; };
E97047552497CC5300781D29 /* check_preopt_caches.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = check_preopt_caches.entitlements; sourceTree = "<group>"; };
E9AD465924925261002AF1DB /* check_preopt_caches.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = check_preopt_caches.mm; sourceTree = "<group>"; };
F9BCC727205C68E800DD9AFC /* libobjc-trampolines.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = "libobjc-trampolines.dylib"; sourceTree = BUILT_PRODUCTS_DIR; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
6EF877D42325D62600963DBB /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
6EF877E22325D93200963DBB /* Symbolication.framework in Frameworks */,
6EF877E52325FAC400963DBB /* Foundation.framework in Frameworks */,
6EF877E02325D92E00963DBB /* CoreSymbolication.framework in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
};
D289988505E68E00004EDB86 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
@ -289,6 +367,8 @@
838486270D6D690F00CEA253 /* Obsolete Source */,
08FB7795FE84155DC02AAC07 /* Source */,
838485B20D6D67F900CEA253 /* Other */,
6EF877D82325D62600963DBB /* objcdt */,
E9AD465824925261002AF1DB /* check-preopt-caches */,
1AB674ADFE9D54B511CA2CBB /* Products */,
F9BCC72A205C6A1600DD9AFC /* Frameworks */,
);
@ -298,6 +378,7 @@
08FB7795FE84155DC02AAC07 /* Source */ = {
isa = PBXGroup;
children = (
C2EB731C23D8A38A0040672B /* dummy-library-mac-i386.c */,
838485B80D6D687300CEA253 /* hashtable2.mm */,
838485BC0D6D687300CEA253 /* maptable.mm */,
9672F7ED14D5F488007CEC96 /* NSObject.mm */,
@ -352,10 +433,23 @@
children = (
D2AAC0630554660B00DB518D /* libobjc.A.dylib */,
F9BCC727205C68E800DD9AFC /* libobjc-trampolines.dylib */,
6EF877D72325D62600963DBB /* objcdt */,
);
name = Products;
sourceTree = "<group>";
};
6EF877D82325D62600963DBB /* objcdt */ = {
isa = PBXGroup;
children = (
6EF877EA232633CC00963DBB /* objcdt.1 */,
6EF877E62326184000963DBB /* json.h */,
6EF877E72326184000963DBB /* json.mm */,
6EF877D92325D62600963DBB /* objcdt.mm */,
6EF877E32325D95300963DBB /* objcdt-entitlements.plist */,
);
path = objcdt;
sourceTree = "<group>";
};
838485B20D6D67F900CEA253 /* Other */ = {
isa = PBXGroup;
children = (
@ -363,6 +457,7 @@
838485B40D6D683300CEA253 /* APPLE_LICENSE */,
838485B50D6D683300CEA253 /* ReleaseNotes.rtf */,
83CE671D1E6E76B60095A33E /* interposable.txt */,
E934A9EF23E9967D0088F26F /* objc4.plist */,
838485B30D6D682B00CEA253 /* libobjc.order */,
);
name = Other;
@ -389,12 +484,13 @@
838485C70D6D688200CEA253 /* Private Headers */ = {
isa = PBXGroup;
children = (
7213C36221FA7C730090A271 /* NSObject-internal.h */,
83112ED30F00599600A5FBAF /* objc-internal.h */,
834EC0A311614167009B2563 /* objc-abi.h */,
838485BB0D6D687300CEA253 /* maptable.h */,
834266D70E665A8B002E4DA2 /* objc-gdb.h */,
7213C36221FA7C730090A271 /* NSObject-internal.h */,
834EC0A311614167009B2563 /* objc-abi.h */,
8306440620D24A3E00E356D2 /* objc-block-trampolines.h */,
834266D70E665A8B002E4DA2 /* objc-gdb.h */,
83112ED30F00599600A5FBAF /* objc-internal.h */,
C22F5207230EF38B001BFE14 /* objc-ptrauth.h */,
);
name = "Private Headers";
sourceTree = "<group>";
@ -437,6 +533,8 @@
83D9269721225A7400299F69 /* arm64-asm.h */,
83D92695212254CF00299F69 /* isa.h */,
838485CF0D6D68A200CEA253 /* objc-config.h */,
C2296C682457336C003FAE61 /* objc-bp-assist.h */,
C217B55222DE556D004369BA /* objc-env.h */,
83BE02E50FCCB24D00661494 /* objc-file-old.h */,
83BE02E60FCCB24D00661494 /* objc-file.h */,
838485D40D6D68A200CEA253 /* objc-initialize.h */,
@ -457,9 +555,21 @@
name = "Project Headers";
sourceTree = "<group>";
};
E9AD465824925261002AF1DB /* check-preopt-caches */ = {
isa = PBXGroup;
children = (
E97047552497CC5300781D29 /* check_preopt_caches.entitlements */,
E9AD465924925261002AF1DB /* check_preopt_caches.mm */,
);
path = "check-preopt-caches";
sourceTree = "<group>";
};
F9BCC72A205C6A1600DD9AFC /* Frameworks */ = {
isa = PBXGroup;
children = (
6EF877E42325FAC400963DBB /* Foundation.framework */,
6EF877E12325D93200963DBB /* Symbolication.framework */,
6EF877DF2325D92E00963DBB /* CoreSymbolication.framework */,
);
name = Frameworks;
sourceTree = "<group>";
@ -528,6 +638,7 @@
838486200D6D68A800CEA253 /* runtime.h in Headers */,
39ABD72312F0B61800D1054C /* objc-weak.h in Headers */,
83F4B52815E843B100E0926F /* NSObjCRuntime.h in Headers */,
C22F5208230EF38B001BFE14 /* objc-ptrauth.h in Headers */,
6ECD0B1F2244999E00910D88 /* llvm-DenseSet.h in Headers */,
83F4B52915E843B100E0926F /* NSObject.h in Headers */,
);
@ -536,6 +647,23 @@
/* End PBXHeadersBuildPhase section */
/* Begin PBXNativeTarget section */
6EF877D62325D62600963DBB /* objcdt */ = {
isa = PBXNativeTarget;
buildConfigurationList = 6EF877DD2325D62600963DBB /* Build configuration list for PBXNativeTarget "objcdt" */;
buildPhases = (
6EF877D32325D62600963DBB /* Sources */,
6EF877D42325D62600963DBB /* Frameworks */,
6EF877D52325D62600963DBB /* Install Manpages */,
);
buildRules = (
);
dependencies = (
);
name = objcdt;
productName = objcdt;
productReference = 6EF877D72325D62600963DBB /* objcdt */;
productType = "com.apple.product-type.tool";
};
D2AAC0620554660B00DB518D /* objc */ = {
isa = PBXNativeTarget;
buildConfigurationList = 1DEB914A08733D8E0010E9CD /* Build configuration list for PBXNativeTarget "objc" */;
@ -545,6 +673,7 @@
D289988505E68E00004EDB86 /* Frameworks */,
830F2AB60D739AB600392440 /* Run Script (markgc) */,
830F2AFA0D73BC5800392440 /* Run Script (symlink) */,
E934A9F023E996CC0088F26F /* CopyFiles */,
);
buildRules = (
);
@ -579,9 +708,15 @@
08FB7793FE84155DC02AAC07 /* Project object */ = {
isa = PBXProject;
attributes = {
BuildIndependentTargetsInParallel = NO;
LastUpgradeCheck = 0440;
TargetAttributes = {
6EF877D62325D62600963DBB = {
CreatedOnToolsVersion = 11.0;
};
6EF877EF23263D7000963DBB = {
CreatedOnToolsVersion = 11.0;
ProvisioningStyle = Automatic;
};
834F9B01212E560100F95A54 = {
CreatedOnToolsVersion = 10.0;
DevelopmentTeam = 59GAB85EFG;
@ -610,6 +745,8 @@
837F67A81A771F63004D34FA /* objc-simulator */,
F9BCC6CA205C68E800DD9AFC /* objc-trampolines */,
834F9B01212E560100F95A54 /* objc4_tests */,
6EF877EF23263D7000963DBB /* objc_executables */,
6EF877D62325D62600963DBB /* objcdt */,
);
};
/* End PBXProject section */
@ -665,6 +802,16 @@
/* End PBXShellScriptBuildPhase section */
/* Begin PBXSourcesBuildPhase section */
6EF877D32325D62600963DBB /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
6EF877E82326184000963DBB /* json.mm in Sources */,
6EF877DA2325D62600963DBB /* objcdt.mm in Sources */,
6EF877DE2325D79000963DBB /* objc-probes.d in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
D2AAC0610554660B00DB518D /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
@ -705,6 +852,7 @@
83B1A8BE0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s in Sources */,
83EB007B121C9EC200B92C16 /* objc-sel-table.s in Sources */,
39ABD72412F0B61800D1054C /* objc-weak.mm in Sources */,
C2EB731D23D8A38A0040672B /* dummy-library-mac-i386.c in Sources */,
83D49E4F13C7C84F0057F1DD /* objc-msg-arm64.s in Sources */,
9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */,
83725F4A14CA5BFA0014370E /* objc-opt.mm in Sources */,
@ -729,6 +877,11 @@
/* End PBXSourcesBuildPhase section */
/* Begin PBXTargetDependency section */
6EF877F423263D8000963DBB /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = 6EF877D62325D62600963DBB /* objcdt */;
targetProxy = 6EF877F323263D8000963DBB /* PBXContainerItemProxy */;
};
837F67AD1A771F6E004D34FA /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = D2AAC0620554660B00DB518D /* objc */;
@ -751,6 +904,8 @@
COPY_PHASE_STRIP = NO;
DEPLOYMENT_LOCATION = YES;
DYLIB_CURRENT_VERSION = 228;
EXCLUDED_SOURCE_FILE_NAMES = "dummy-library-mac-i386.c";
"EXCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "*";
EXECUTABLE_PREFIX = lib;
GCC_CW_ASM_SYNTAX = NO;
GCC_OPTIMIZATION_LEVEL = 0;
@ -762,6 +917,7 @@
"$(CONFIGURATION_BUILD_DIR)/usr/local/include/**",
/System/Library/Frameworks/System.framework/PrivateHeaders,
);
"INCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "dummy-library-mac-i386.c";
INSTALL_PATH = /usr/lib;
IS_ZIPPERED = YES;
LLVM_LTO = NO;
@ -787,6 +943,10 @@
"-interposable_list",
"-Xlinker",
interposable.txt,
"-Xlinker",
"-headerpad",
"-Xlinker",
0x100,
);
"OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = (
"-lc++abi",
@ -810,7 +970,9 @@
"-interposable_list",
"-Xlinker",
interposable.txt,
"-loah",
);
"OTHER_LDFLAGS[sdk=macosx*][arch=i386]" = "-nodefaultlibs";
OTHER_TAPI_FLAGS = "-exclude-public-header $(DSTROOT)/usr/include/objc/ObjectiveC.apinotes -exclude-public-header $(DSTROOT)/usr/include/objc/module.modulemap -Xparser -Wno-deprecated-declarations -Xparser -Wno-unavailable-declarations -Xparser -D_OBJC_PRIVATE_H_=1 -DOBJC_DECLARE_SYMBOLS=1";
PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc;
PRODUCT_NAME = objc.A;
@ -829,6 +991,8 @@
"COPY_HEADERS_UNIFDEF_FLAGS[sdk=macosx*]" = "-DBUILD_FOR_OSX";
DEPLOYMENT_LOCATION = YES;
DYLIB_CURRENT_VERSION = 228;
EXCLUDED_SOURCE_FILE_NAMES = "dummy-library-mac-i386.c";
"EXCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "*";
EXECUTABLE_PREFIX = lib;
GCC_CW_ASM_SYNTAX = NO;
GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = NO;
@ -839,6 +1003,7 @@
"$(CONFIGURATION_BUILD_DIR)/usr/local/include/**",
/System/Library/Frameworks/System.framework/PrivateHeaders,
);
"INCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "dummy-library-mac-i386.c";
INSTALL_PATH = /usr/lib;
IS_ZIPPERED = YES;
ORDER_FILE = "$(SDKROOT)/AppleInternal/OrderFiles/libobjc.order";
@ -863,6 +1028,10 @@
"-interposable_list",
"-Xlinker",
interposable.txt,
"-Xlinker",
"-headerpad",
"-Xlinker",
0x100,
);
"OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = (
"-lc++abi",
@ -886,7 +1055,9 @@
"-interposable_list",
"-Xlinker",
interposable.txt,
"-loah",
);
"OTHER_LDFLAGS[sdk=macosx*][arch=i386]" = "-nodefaultlibs";
OTHER_TAPI_FLAGS = "-exclude-public-header $(DSTROOT)/usr/include/objc/ObjectiveC.apinotes -exclude-public-header $(DSTROOT)/usr/include/objc/module.modulemap -Xparser -Wno-deprecated-declarations -Xparser -Wno-unavailable-declarations -Xparser -D_OBJC_PRIVATE_H_=1 -DOBJC_DECLARE_SYMBOLS=1";
PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc;
PRODUCT_NAME = objc.A;
@ -908,6 +1079,7 @@
CLANG_CXX_LIBRARY = "libc++";
CLANG_LINK_OBJC_RUNTIME = NO;
CLANG_OBJC_RUNTIME = NO;
CODE_SIGN_IDENTITY = "-";
DEBUG_INFORMATION_FORMAT = dwarf;
GCC_ENABLE_CPP_EXCEPTIONS = NO;
GCC_ENABLE_CPP_RTTI = NO;
@ -954,6 +1126,7 @@
CLANG_CXX_LIBRARY = "libc++";
CLANG_LINK_OBJC_RUNTIME = NO;
CLANG_OBJC_RUNTIME = NO;
CODE_SIGN_IDENTITY = "-";
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
GCC_ENABLE_CPP_EXCEPTIONS = NO;
GCC_ENABLE_CPP_RTTI = NO;
@ -995,6 +1168,59 @@
};
name = Release;
};
6EF877DB2325D62600963DBB /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
CODE_SIGN_ENTITLEMENTS = "objcdt/objcdt-entitlements.plist";
CODE_SIGN_IDENTITY = "-";
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
GCC_PREPROCESSOR_DEFINITIONS = (
"__BUILDING_OBJCDT__=1",
"$(inherited)",
);
HEADER_SEARCH_PATHS = (
"$(SRCROOT)/runtime",
/System/Library/Frameworks/System.framework/PrivateHeaders,
);
PRODUCT_NAME = "$(TARGET_NAME)";
SYSTEM_FRAMEWORK_SEARCH_PATHS = "$(inherited) $(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks";
};
name = Debug;
};
6EF877DC2325D62600963DBB /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
CODE_SIGN_ENTITLEMENTS = "objcdt/objcdt-entitlements.plist";
CODE_SIGN_IDENTITY = "-";
GCC_PREPROCESSOR_DEFINITIONS = (
"__BUILDING_OBJCDT__=1",
"$(inherited)",
);
HEADER_SEARCH_PATHS = (
"$(SRCROOT)/runtime",
/System/Library/Frameworks/System.framework/PrivateHeaders,
);
PRODUCT_NAME = "$(TARGET_NAME)";
SYSTEM_FRAMEWORK_SEARCH_PATHS = "$(inherited) $(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks";
};
name = Release;
};
6EF877F023263D7000963DBB /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
CODE_SIGN_STYLE = Automatic;
PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Debug;
};
6EF877F123263D7000963DBB /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
CODE_SIGN_STYLE = Automatic;
PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Release;
};
834F9B02212E560200F95A54 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
@ -1034,6 +1260,7 @@
COPY_HEADERS_UNIFDEF_FLAGS = "-UBUILD_FOR_OSX";
"COPY_HEADERS_UNIFDEF_FLAGS[sdk=macosx*]" = "-DBUILD_FOR_OSX";
COPY_PHASE_STRIP = NO;
DEPLOYMENT_LOCATION = YES;
DYLIB_CURRENT_VERSION = 228;
EXECUTABLE_PREFIX = lib;
GCC_CW_ASM_SYNTAX = NO;
@ -1055,6 +1282,7 @@
OTHER_LDFLAGS = (
"-Xlinker",
"-not_for_dyld_shared_cache",
"-nodefaultlibs",
);
PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc;
PRODUCT_NAME = "$(TARGET_NAME)";
@ -1070,6 +1298,7 @@
COPY_HEADERS_RUN_UNIFDEF = YES;
COPY_HEADERS_UNIFDEF_FLAGS = "-UBUILD_FOR_OSX";
"COPY_HEADERS_UNIFDEF_FLAGS[sdk=macosx*]" = "-DBUILD_FOR_OSX";
DEPLOYMENT_LOCATION = YES;
DYLIB_CURRENT_VERSION = 228;
EXECUTABLE_PREFIX = lib;
GCC_CW_ASM_SYNTAX = NO;
@ -1090,6 +1319,7 @@
OTHER_LDFLAGS = (
"-Xlinker",
"-not_for_dyld_shared_cache",
"-nodefaultlibs",
);
PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc;
PRODUCT_NAME = "$(TARGET_NAME)";
@ -1120,6 +1350,24 @@
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
6EF877DD2325D62600963DBB /* Build configuration list for PBXNativeTarget "objcdt" */ = {
isa = XCConfigurationList;
buildConfigurations = (
6EF877DB2325D62600963DBB /* Debug */,
6EF877DC2325D62600963DBB /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
6EF877F223263D7000963DBB /* Build configuration list for PBXAggregateTarget "objc_executables" */ = {
isa = XCConfigurationList;
buildConfigurations = (
6EF877F023263D7000963DBB /* Debug */,
6EF877F123263D7000963DBB /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
834F9B04212E560200F95A54 /* Build configuration list for PBXAggregateTarget "objc4_tests" */ = {
isa = XCConfigurationList;
buildConfigurations = (

11
objc4.plist Normal file
View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>preoptimizedCaches</key>
<dict>
<key>Enabled</key>
<true/>
</dict>
</dict>
</plist>

82
objcdt/json.h Normal file
View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2019 Apple Inc. All Rights Reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#ifndef _OBJC_OBJCDT_JSON_H_
#define _OBJC_OBJCDT_JSON_H_
#include <cstdint>
#include <cstdbool>
#include <stdio.h>
#include <functional>
namespace json {
enum context: uint8_t {
root,
array_value,
object_value,
object_key,
done,
};
class writer {
private:
FILE *_file;
context _context;
int _depth;
bool _needs_comma;
void begin_value(int sep = '\0');
void advance(context old);
void key(const char *key);
public:
writer(FILE *f);
~writer();
void object(std::function<void()>);
void object(const char *key, std::function<void()>);
void array(std::function<void()>);
void array(const char *key, std::function<void()>);
void boolean(bool value);
void boolean(const char *key, bool value);
void number(uint64_t value);
void number(const char *key, uint64_t value);
void string(const char *s);
void string(const char *key, const char *s);
__printflike(2, 3)
void stringf(const char *fmt, ...);
__printflike(3, 4)
void stringf(const char *key, const char *fmt, ...);
};
}
#endif /* _OBJC_OBJCDT_JSON_H_ */

234
objcdt/json.mm Normal file
View File

@ -0,0 +1,234 @@
/*
* Copyright (c) 2019 Apple Inc. All Rights Reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <assert.h>
#include "json.h"
namespace json {
static bool
context_is_value(context c)
{
return c == root || c == array_value || c == object_value;
}
writer::writer(FILE *f)
: _file(f)
, _context(root)
, _depth(0)
, _needs_comma(false)
{
}
writer::~writer()
{
fputc('\n', _file);
fflush(_file);
}
void
writer::begin_value(int sep)
{
if (_needs_comma) {
_needs_comma = false;
if (sep) {
fprintf(_file, ", %c\n", sep);
return;
}
fputs(",\n", _file);
}
if (_context == array_value || _context == object_key) {
fprintf(_file, "%*s", _depth * 2, "");
}
if (sep) {
fprintf(_file, "%c\n", sep);
}
}
void
writer::advance(context c)
{
switch (c) {
case root:
_context = done;
_needs_comma = false;
break;
case array_value:
_context = array_value;
_needs_comma = true;
break;
case object_value:
_context = object_key;
_needs_comma = true;
break;
case object_key:
_context = object_value;
_needs_comma = false;
break;
case done:
assert(false);
break;
}
}
void
writer::key(const char *key)
{
assert(_context == object_key);
begin_value();
fprintf(_file, "\"%s\": ", key);
advance(_context);
}
void
writer::object(std::function<void()> f)
{
context old = _context;
assert(context_is_value(old));
begin_value('{');
_depth++;
_context = object_key;
_needs_comma = false;
f();
_depth--;
fprintf(_file, "\n%*s}", _depth * 2, "");
advance(old);
}
void
writer::object(const char *k, std::function<void()> f)
{
key(k);
object(f);
}
void
writer::array(std::function<void()> f)
{
context old = _context;
assert(context_is_value(old));
begin_value('[');
_depth++;
_context = array_value;
_needs_comma = false;
f();
_depth--;
fprintf(_file, "\n%*s]", _depth * 2, "");
advance(old);
}
void
writer::array(const char *k, std::function<void()> f)
{
key(k);
array(f);
}
void
writer::boolean(bool value)
{
assert(context_is_value(_context));
begin_value();
fputs(value ? "true" : "false", _file);
advance(_context);
}
void
writer::boolean(const char *k, bool value)
{
key(k);
boolean(value);
}
void
writer::number(uint64_t value)
{
assert(context_is_value(_context));
begin_value();
fprintf(_file, "%lld", value);
advance(_context);
}
void
writer::number(const char *k, uint64_t value)
{
key(k);
number(value);
}
void
writer::string(const char *s)
{
assert(context_is_value(_context));
begin_value();
fprintf(_file, "\"%s\"", s);
advance(_context);
}
void
writer::string(const char *k, const char *s)
{
key(k);
string(s);
}
void
writer::stringf(const char *fmt, ...)
{
va_list ap;
assert(context_is_value(_context));
begin_value();
fputc('"', _file);
va_start(ap, fmt);
vfprintf(_file, fmt, ap);
va_end(ap);
fputc('"', _file);
advance(_context);
}
void
writer::stringf(const char *k, const char *fmt, ...)
{
va_list ap;
key(k);
assert(context_is_value(_context));
begin_value();
fputc('"', _file);
va_start(ap, fmt);
vfprintf(_file, fmt, ap);
va_end(ap);
fputc('"', _file);
advance(_context);
}
} // json

View File

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>task_for_pid-allow</key>
<true/>
<key>com.apple.system-task-ports</key>
<true/>
</dict>
</plist>

19
objcdt/objcdt.1 Normal file
View File

@ -0,0 +1,19 @@
.\" Copyright (c) 2019, Apple Computer, Inc. All rights reserved.
.\"
.Dd September 9, 2019 \" DATE
.Dt objcdt 1 \" Program name and manual section number
.Os "OS X"
.Sh NAME
.Nm objcdt
.Nd Tool to debug objective-C usage in live processes
.Sh SYNOPSIS
.Nm objcdt
.Sh DESCRIPTION
The
.Nm
utility is a small CLI with embedded help that can dump some information about
the Objective-C runtime state in live processes.
.Pp
Help can be obtained using
.Nm
.Ar help

36
objcdt/objcdt.mm Normal file
View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2019 Apple Inc. All Rights Reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include "objc-private.h"
#include "objc-ptrauth.h"
#include <stdio.h>
#include <sysexits.h>
#include <getopt.h>
#include <pthread.h>
#include <sys/sysctl.h>
#include <string>
int main(int argc, const char *argv[])
{
return EX_UNAVAILABLE;
}

0
objcrt/objcrt.vcproj Normal file → Executable file
View File

0
prebuild.bat Normal file → Executable file
View File

Binary file not shown.

View File

@ -222,6 +222,40 @@ LExit$0:
.endmacro
//////////////////////////////////////////////////////////////////////
//
// SAVE_REGS
//
// Create a stack frame and save all argument registers in preparation
// for a function call.
//////////////////////////////////////////////////////////////////////
.macro SAVE_REGS
stmfd sp!, {r0-r3,r7,lr}
add r7, sp, #16
sub sp, #8 // align stack
FP_SAVE
.endmacro
//////////////////////////////////////////////////////////////////////
//
// RESTORE_REGS
//
// Restore all argument registers and pop the stack frame created by
// SAVE_REGS.
//////////////////////////////////////////////////////////////////////
.macro RESTORE_REGS
FP_RESTORE
add sp, #8 // align stack
ldmfd sp!, {r0-r3,r7,lr}
.endmacro
/////////////////////////////////////////////////////////////////////
//
// CacheLookup NORMAL|STRET <function>
@ -666,10 +700,7 @@ LNilReceiver:
.macro MethodTableLookup
stmfd sp!, {r0-r3,r7,lr}
add r7, sp, #16
sub sp, #8 // align stack
FP_SAVE
SAVE_REGS
// lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
.if $0 == NORMAL
@ -680,7 +711,7 @@ LNilReceiver:
mov r1, r2 // selector
.endif
mov r2, r9 // class to search
mov r3, #3 // LOOKUP_INITIALIZE | LOOKUP_INITIALIZE
mov r3, #3 // LOOKUP_INITIALIZE | LOOKUP_RESOLVER
blx _lookUpImpOrForward
mov r12, r0 // r12 = IMP
@ -690,9 +721,7 @@ LNilReceiver:
tst r12, r12 // set ne for stret forwarding
.endif
FP_RESTORE
add sp, #8 // align stack
ldmfd sp!, {r0-r3,r7,lr}
RESTORE_REGS
.endmacro
@ -819,18 +848,55 @@ LNilReceiver:
ENTRY _method_invoke
// See if this is a small method.
lsls r12, r1, #31
bne.w L_method_invoke_small
// We can directly load the IMP from big methods.
// r1 is method triplet instead of SEL
ldr r12, [r1, #METHOD_IMP]
ldr r1, [r1, #METHOD_NAME]
bx r12
L_method_invoke_small:
// Small methods require a call to handle swizzling.
SAVE_REGS
mov r0, r1
bl __method_getImplementationAndName
mov r12, r0
mov r9, r1
RESTORE_REGS
mov r1, r9
bx r12
END_ENTRY _method_invoke
ENTRY _method_invoke_stret
// See if this is a small method.
lsls r12, r2, #31
bne.w L_method_invoke_stret_small
// We can directly load the IMP from big methods.
// r2 is method triplet instead of SEL
ldr r12, [r2, #METHOD_IMP]
ldr r2, [r2, #METHOD_NAME]
bx r12
L_method_invoke_stret_small:
// Small methods require a call to handle swizzling.
SAVE_REGS
mov r0, r2
bl __method_getImplementationAndName
mov r12, r0
mov r9, r1
RESTORE_REGS
mov r2, r9
bx r12
END_ENTRY _method_invoke_stret

507
runtime/Messengers.subproj/objc-msg-arm64.S Normal file → Executable file
View File

@ -30,8 +30,19 @@
#include <arm/arch.h>
#include "isa.h"
#include "arm64-asm.h"
#include "objc-config.h"
#include "arm64-asm.h"
#if TARGET_OS_IPHONE && __LP64__
.section __TEXT,__objc_methname,cstring_literals
l_MagicSelector: /* the shared cache builder knows about this value */
.byte 0xf0, 0x9f, 0xa4, 0xaf, 0
.section __DATA,__objc_selrefs,literal_pointers,no_dead_strip
.p2align 3
_MagicSelRef:
.quad l_MagicSelector
#endif
.data
@ -57,7 +68,6 @@
_objc_restartableRanges:
RestartableEntry _cache_getImp
RestartableEntry _objc_msgSend
RestartableEntry _objc_msgSendSuper
RestartableEntry _objc_msgSendSuper2
RestartableEntry _objc_msgLookup
RestartableEntry _objc_msgLookupSuper2
@ -81,13 +91,13 @@ _objc_restartableRanges:
/********************************************************************
* GetClassFromIsa_p16 src
* GetClassFromIsa_p16 src, needs_auth, auth_address
* src is a raw isa field. Sets p16 to the corresponding class pointer.
* The raw isa might be an indexed isa to be decoded, or a
* packed isa that needs to be masked.
*
* On exit:
* $0 is unchanged
* src is unchanged
* p16 is a class pointer
* x10 is clobbered
********************************************************************/
@ -99,11 +109,11 @@ _objc_indexed_classes:
.fill ISA_INDEX_COUNT, PTRSIZE, 0
#endif
.macro GetClassFromIsa_p16 /* src */
.macro GetClassFromIsa_p16 src, needs_auth, auth_address /* note: auth_address is not required if !needs_auth */
#if SUPPORT_INDEXED_ISA
// Indexed isa
mov p16, $0 // optimistically set dst = src
mov p16, \src // optimistically set dst = src
tbz p16, #ISA_INDEX_IS_NPI_BIT, 1f // done if not non-pointer isa
// isa in p16 is indexed
adrp x10, _objc_indexed_classes@PAGE
@ -113,12 +123,15 @@ _objc_indexed_classes:
1:
#elif __LP64__
.if \needs_auth == 0 // _cache_getImp takes an authed class already
mov p16, \src
.else
// 64-bit packed isa
and p16, $0, #ISA_MASK
ExtractISA p16, \src, \auth_address
.endif
#else
// 32-bit raw isa
mov p16, $0
mov p16, \src
#endif
@ -169,9 +182,85 @@ LExit$0:
#define FrameWithNoSaves 0x04000000 // frame, no non-volatile saves
#define MSGSEND 100
#define METHOD_INVOKE 101
//////////////////////////////////////////////////////////////////////
//
// SAVE_REGS
//
// Create a stack frame and save all argument registers in preparation
// for a function call.
//////////////////////////////////////////////////////////////////////
.macro SAVE_REGS kind
// push frame
SignLR
stp fp, lr, [sp, #-16]!
mov fp, sp
// save parameter registers: x0..x8, q0..q7
sub sp, sp, #(10*8 + 8*16)
stp q0, q1, [sp, #(0*16)]
stp q2, q3, [sp, #(2*16)]
stp q4, q5, [sp, #(4*16)]
stp q6, q7, [sp, #(6*16)]
stp x0, x1, [sp, #(8*16+0*8)]
stp x2, x3, [sp, #(8*16+2*8)]
stp x4, x5, [sp, #(8*16+4*8)]
stp x6, x7, [sp, #(8*16+6*8)]
.if \kind == MSGSEND
stp x8, x15, [sp, #(8*16+8*8)]
mov x16, x15 // stashed by CacheLookup, restore to x16
.elseif \kind == METHOD_INVOKE
str x8, [sp, #(8*16+8*8)]
.else
.abort Unknown kind.
.endif
.endmacro
//////////////////////////////////////////////////////////////////////
//
// RESTORE_REGS
//
// Restore all argument registers and pop the stack frame created by
// SAVE_REGS.
//////////////////////////////////////////////////////////////////////
.macro RESTORE_REGS kind
ldp q0, q1, [sp, #(0*16)]
ldp q2, q3, [sp, #(2*16)]
ldp q4, q5, [sp, #(4*16)]
ldp q6, q7, [sp, #(6*16)]
ldp x0, x1, [sp, #(8*16+0*8)]
ldp x2, x3, [sp, #(8*16+2*8)]
ldp x4, x5, [sp, #(8*16+4*8)]
ldp x6, x7, [sp, #(8*16+6*8)]
.if \kind == MSGSEND
ldp x8, x16, [sp, #(8*16+8*8)]
orr x16, x16, #2 // for the sake of instrumentations, remember it was the slowpath
.elseif \kind == METHOD_INVOKE
ldr x8, [sp, #(8*16+8*8)]
.else
.abort Unknown kind.
.endif
mov sp, fp
ldp fp, lr, [sp], #16
AuthenticateLR
.endmacro
/********************************************************************
*
* CacheLookup NORMAL|GETIMP|LOOKUP <function>
* CacheLookup NORMAL|GETIMP|LOOKUP <function> MissLabelDynamic MissLabelConstant
*
* MissLabelConstant is only used for the GETIMP variant.
*
* Locate the implementation for a selector in a class method cache.
*
@ -185,11 +274,27 @@ LExit$0:
* x16 = class to be searched
*
* Kills:
* x9,x10,x11,x12, x17
* x9,x10,x11,x12,x13,x15,x17
*
* Untouched:
* x14
*
* On exit: (found) calls or returns IMP
* with x16 = class, x17 = IMP
* In LOOKUP mode, the two low bits are set to 0x3
* if we hit a constant cache (used in objc_trace)
* (not found) jumps to LCacheMiss
* with x15 = class
* For constant caches in LOOKUP mode, the low bit
* of x16 is set to 0x1 to indicate we had to fallback.
* In addition, when LCacheMiss is __objc_msgSend_uncached or
* __objc_msgLookup_uncached, 0x2 will be set in x16
* to remember we took the slowpath.
* So the two low bits of x16 on exit mean:
* 0: dynamic hit
* 1: fallback to the parent class, when there is a preoptimized cache
* 2: slowpath
* 3: preoptimized cache hit
*
********************************************************************/
@ -197,60 +302,37 @@ LExit$0:
#define GETIMP 1
#define LOOKUP 2
// CacheHit: x17 = cached IMP, x12 = address of cached IMP, x1 = SEL, x16 = isa
// CacheHit: x17 = cached IMP, x10 = address of buckets, x1 = SEL, x16 = isa
.macro CacheHit
.if $0 == NORMAL
TailCallCachedImp x17, x12, x1, x16 // authenticate and call imp
TailCallCachedImp x17, x10, x1, x16 // authenticate and call imp
.elseif $0 == GETIMP
mov p0, p17
cbz p0, 9f // don't ptrauth a nil imp
AuthAndResignAsIMP x0, x12, x1, x16 // authenticate imp and re-sign as IMP
AuthAndResignAsIMP x0, x10, x1, x16 // authenticate imp and re-sign as IMP
9: ret // return IMP
.elseif $0 == LOOKUP
// No nil check for ptrauth: the caller would crash anyway when they
// jump to a nil IMP. We don't care if that jump also fails ptrauth.
AuthAndResignAsIMP x17, x12, x1, x16 // authenticate imp and re-sign as IMP
AuthAndResignAsIMP x17, x10, x1, x16 // authenticate imp and re-sign as IMP
cmp x16, x15
cinc x16, x16, ne // x16 += 1 when x15 != x16 (for instrumentation ; fallback to the parent class)
ret // return imp via x17
.else
.abort oops
.endif
.endmacro
.macro CheckMiss
// miss if bucket->sel == 0
.if $0 == GETIMP
cbz p9, LGetImpMiss
.elseif $0 == NORMAL
cbz p9, __objc_msgSend_uncached
.elseif $0 == LOOKUP
cbz p9, __objc_msgLookup_uncached
.else
.abort oops
.endif
.endmacro
.macro JumpMiss
.if $0 == GETIMP
b LGetImpMiss
.elseif $0 == NORMAL
b __objc_msgSend_uncached
.elseif $0 == LOOKUP
b __objc_msgLookup_uncached
.else
.abort oops
.endif
.endmacro
.macro CacheLookup
.macro CacheLookup Mode, Function, MissLabelDynamic, MissLabelConstant
//
// Restart protocol:
//
// As soon as we're past the LLookupStart$1 label we may have loaded
// an invalid cache pointer or mask.
// As soon as we're past the LLookupStart\Function label we may have
// loaded an invalid cache pointer or mask.
//
// When task_restartable_ranges_synchronize() is called,
// (or when a signal hits us) before we're past LLookupEnd$1,
// then our PC will be reset to LLookupRecover$1 which forcefully
// (or when a signal hits us) before we're past LLookupEnd\Function,
// then our PC will be reset to LLookupRecover\Function which forcefully
// jumps to the cache-miss codepath which have the following
// requirements:
//
@ -263,15 +345,33 @@ LExit$0:
// - x16 contains the isa
// - other registers are set as per calling conventions
//
LLookupStart$1:
mov x15, x16 // stash the original isa
LLookupStart\Function:
// p1 = SEL, p16 = isa
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
ldr p10, [x16, #CACHE] // p10 = mask|buckets
lsr p11, p10, #48 // p11 = mask
and p10, p10, #0xffffffffffff // p10 = buckets
and w12, w1, w11 // x12 = _cmd & mask
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
ldr p11, [x16, #CACHE] // p11 = mask|buckets
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
#if CONFIG_USE_PREOPT_CACHES
#if __has_feature(ptrauth_calls)
tbnz p11, #0, LLookupPreopt\Function
and p10, p11, #0x0000ffffffffffff // p10 = buckets
#else
and p10, p11, #0x0000fffffffffffe // p10 = buckets
tbnz p11, #0, LLookupPreopt\Function
#endif
eor p12, p1, p1, LSR #7
and p12, p12, p11, LSR #48 // x12 = (_cmd ^ (_cmd >> 7)) & mask
#else
and p10, p11, #0x0000ffffffffffff // p10 = buckets
and p12, p1, p11, LSR #48 // x12 = _cmd & mask
#endif // CONFIG_USE_PREOPT_CACHES
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
ldr p11, [x16, #CACHE] // p11 = mask|buckets
and p10, p11, #~0xf // p10 = buckets
and p11, p11, #0xf // p11 = maskShift
mov p12, #0xffff
@ -281,52 +381,122 @@ LLookupStart$1:
#error Unsupported cache mask storage for ARM64.
#endif
add p13, p10, p12, LSL #(1+PTRSHIFT)
// p13 = buckets + ((_cmd & mask) << (1+PTRSHIFT))
add p12, p10, p12, LSL #(1+PTRSHIFT)
// p12 = buckets + ((_cmd & mask) << (1+PTRSHIFT))
// do {
1: ldp p17, p9, [x13], #-BUCKET_SIZE // {imp, sel} = *bucket--
cmp p9, p1 // if (sel != _cmd) {
b.ne 3f // scan more
// } else {
2: CacheHit \Mode // hit: call or return imp
// }
3: cbz p9, \MissLabelDynamic // if (sel == 0) goto Miss;
cmp p13, p10 // } while (bucket >= buckets)
b.hs 1b
ldp p17, p9, [x12] // {imp, sel} = *bucket
1: cmp p9, p1 // if (bucket->sel != _cmd)
b.ne 2f // scan more
CacheHit $0 // call or return imp
// wrap-around:
// p10 = first bucket
// p11 = mask (and maybe other bits on LP64)
// p12 = _cmd & mask
//
// A full cache can happen with CACHE_ALLOW_FULL_UTILIZATION.
// So stop when we circle back to the first probed bucket
// rather than when hitting the first bucket again.
//
// Note that we might probe the initial bucket twice
// when the first probed slot is the last entry.
2: // not hit: p12 = not-hit bucket
CheckMiss $0 // miss if bucket->sel == 0
cmp p12, p10 // wrap if bucket == buckets
b.eq 3f
ldp p17, p9, [x12, #-BUCKET_SIZE]! // {imp, sel} = *--bucket
b 1b // loop
3: // wrap: p12 = first bucket, w11 = mask
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
add p12, p12, p11, LSR #(48 - (1+PTRSHIFT))
// p12 = buckets + (mask << 1+PTRSHIFT)
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
add p13, p10, w11, UXTW #(1+PTRSHIFT)
// p13 = buckets + (mask << 1+PTRSHIFT)
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
add p13, p10, p11, LSR #(48 - (1+PTRSHIFT))
// p13 = buckets + (mask << 1+PTRSHIFT)
// see comment about maskZeroBits
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
add p12, p12, p11, LSL #(1+PTRSHIFT)
// p12 = buckets + (mask << 1+PTRSHIFT)
add p13, p10, p11, LSL #(1+PTRSHIFT)
// p13 = buckets + (mask << 1+PTRSHIFT)
#else
#error Unsupported cache mask storage for ARM64.
#endif
add p12, p10, p12, LSL #(1+PTRSHIFT)
// p12 = first probed bucket
// Clone scanning loop to miss instead of hang when cache is corrupt.
// The slow path may detect any corruption and halt later.
// do {
4: ldp p17, p9, [x13], #-BUCKET_SIZE // {imp, sel} = *bucket--
cmp p9, p1 // if (sel == _cmd)
b.eq 2b // goto hit
cmp p9, #0 // } while (sel != 0 &&
ccmp p13, p12, #0, ne // bucket > first_probed)
b.hi 4b
ldp p17, p9, [x12] // {imp, sel} = *bucket
1: cmp p9, p1 // if (bucket->sel != _cmd)
b.ne 2f // scan more
CacheHit $0 // call or return imp
LLookupEnd\Function:
LLookupRecover\Function:
b \MissLabelDynamic
2: // not hit: p12 = not-hit bucket
CheckMiss $0 // miss if bucket->sel == 0
cmp p12, p10 // wrap if bucket == buckets
b.eq 3f
ldp p17, p9, [x12, #-BUCKET_SIZE]! // {imp, sel} = *--bucket
b 1b // loop
#if CONFIG_USE_PREOPT_CACHES
#if CACHE_MASK_STORAGE != CACHE_MASK_STORAGE_HIGH_16
#error config unsupported
#endif
LLookupPreopt\Function:
#if __has_feature(ptrauth_calls)
and p10, p11, #0x007ffffffffffffe // p10 = buckets
autdb x10, x16 // auth as early as possible
#endif
LLookupEnd$1:
LLookupRecover$1:
3: // double wrap
JumpMiss $0
// x12 = (_cmd - first_shared_cache_sel)
adrp x9, _MagicSelRef@PAGE
ldr p9, [x9, _MagicSelRef@PAGEOFF]
sub p12, p1, p9
// w9 = ((_cmd - first_shared_cache_sel) >> hash_shift & hash_mask)
#if __has_feature(ptrauth_calls)
// bits 63..60 of x11 are the number of bits in hash_mask
// bits 59..55 of x11 is hash_shift
lsr x17, x11, #55 // w17 = (hash_shift, ...)
lsr w9, w12, w17 // >>= shift
lsr x17, x11, #60 // w17 = mask_bits
mov x11, #0x7fff
lsr x11, x11, x17 // p11 = mask (0x7fff >> mask_bits)
and x9, x9, x11 // &= mask
#else
// bits 63..53 of x11 is hash_mask
// bits 52..48 of x11 is hash_shift
lsr x17, x11, #48 // w17 = (hash_shift, hash_mask)
lsr w9, w12, w17 // >>= shift
and x9, x9, x11, LSR #53 // &= mask
#endif
ldr x17, [x10, x9, LSL #3] // x17 == sel_offs | (imp_offs << 32)
cmp x12, w17, uxtw
.if \Mode == GETIMP
b.ne \MissLabelConstant // cache miss
sub x0, x16, x17, LSR #32 // imp = isa - imp_offs
SignAsImp x0
ret
.else
b.ne 5f // cache miss
sub x17, x16, x17, LSR #32 // imp = isa - imp_offs
.if \Mode == NORMAL
br x17
.elseif \Mode == LOOKUP
orr x16, x16, #3 // for instrumentation, note that we hit a constant cache
SignAsImp x17
ret
.else
.abort unhandled mode \Mode
.endif
5: ldursw x9, [x10, #-8] // offset -8 is the fallback offset
add x16, x16, x9 // compute the fallback isa
b LLookupStart\Function // lookup again with a new isa
.endif
#endif // CONFIG_USE_PREOPT_CACHES
.endmacro
@ -345,12 +515,37 @@ LLookupRecover$1:
#if SUPPORT_TAGGED_POINTERS
.data
.align 3
.globl _objc_debug_taggedpointer_classes
_objc_debug_taggedpointer_classes:
.fill 16, 8, 0
.globl _objc_debug_taggedpointer_ext_classes
_objc_debug_taggedpointer_ext_classes:
.fill 256, 8, 0
// Dispatch for split tagged pointers take advantage of the fact that
// the extended tag classes array immediately precedes the standard
// tag array. The .alt_entry directive ensures that the two stay
// together. This is harmless when using non-split tagged pointers.
.globl _objc_debug_taggedpointer_classes
.alt_entry _objc_debug_taggedpointer_classes
_objc_debug_taggedpointer_classes:
.fill 16, 8, 0
// Look up the class for a tagged pointer in x0, placing it in x16.
.macro GetTaggedClass
and x10, x0, #0x7 // x10 = small tag
asr x11, x0, #55 // x11 = large tag with 1s filling the top (because bit 63 is 1 on a tagged pointer)
cmp x10, #7 // tag == 7?
csel x12, x11, x10, eq // x12 = index in tagged pointer classes array, negative for extended tags.
// The extended tag array is placed immediately before the basic tag array
// so this looks into the right place either way. The sign extension done
// by the asr instruction produces the value extended_tag - 256, which produces
// the correct index in the extended tagged pointer classes array.
// x16 = _objc_debug_taggedpointer_classes[x12]
adrp x10, _objc_debug_taggedpointer_classes@PAGE
add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF
ldr x16, [x10, x12, LSL #3]
.endmacro
#endif
ENTRY _objc_msgSend
@ -363,30 +558,15 @@ _objc_debug_taggedpointer_ext_classes:
b.eq LReturnZero
#endif
ldr p13, [x0] // p13 = isa
GetClassFromIsa_p16 p13 // p16 = class
GetClassFromIsa_p16 p13, 1, x0 // p16 = class
LGetIsaDone:
// calls imp or objc_msgSend_uncached
CacheLookup NORMAL, _objc_msgSend
CacheLookup NORMAL, _objc_msgSend, __objc_msgSend_uncached
#if SUPPORT_TAGGED_POINTERS
LNilOrTagged:
b.eq LReturnZero // nil check
// tagged
adrp x10, _objc_debug_taggedpointer_classes@PAGE
add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF
ubfx x11, x0, #60, #4
ldr x16, [x10, x11, LSL #3]
adrp x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGE
add x10, x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGEOFF
cmp x10, x16
b.ne LGetIsaDone
// ext tagged
adrp x10, _objc_debug_taggedpointer_ext_classes@PAGE
add x10, x10, _objc_debug_taggedpointer_ext_classes@PAGEOFF
ubfx x11, x0, #52, #8
ldr x16, [x10, x11, LSL #3]
GetTaggedClass
b LGetIsaDone
// SUPPORT_TAGGED_POINTERS
#endif
@ -412,37 +592,22 @@ LReturnZero:
b.eq LLookup_Nil
#endif
ldr p13, [x0] // p13 = isa
GetClassFromIsa_p16 p13 // p16 = class
GetClassFromIsa_p16 p13, 1, x0 // p16 = class
LLookup_GetIsaDone:
// returns imp
CacheLookup LOOKUP, _objc_msgLookup
CacheLookup LOOKUP, _objc_msgLookup, __objc_msgLookup_uncached
#if SUPPORT_TAGGED_POINTERS
LLookup_NilOrTagged:
b.eq LLookup_Nil // nil check
// tagged
adrp x10, _objc_debug_taggedpointer_classes@PAGE
add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF
ubfx x11, x0, #60, #4
ldr x16, [x10, x11, LSL #3]
adrp x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGE
add x10, x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGEOFF
cmp x10, x16
b.ne LLookup_GetIsaDone
LLookup_ExtTag:
adrp x10, _objc_debug_taggedpointer_ext_classes@PAGE
add x10, x10, _objc_debug_taggedpointer_ext_classes@PAGEOFF
ubfx x11, x0, #52, #8
ldr x16, [x10, x11, LSL #3]
GetTaggedClass
b LLookup_GetIsaDone
// SUPPORT_TAGGED_POINTERS
#endif
LLookup_Nil:
adrp x17, __objc_msgNil@PAGE
add x17, x17, __objc_msgNil@PAGEOFF
adr x17, __objc_msgNil
SignAsImp x17
ret
END_ENTRY _objc_msgLookup
@ -465,8 +630,7 @@ LLookup_Nil:
UNWIND _objc_msgSendSuper, NoFrame
ldp p0, p16, [x0] // p0 = real receiver, p16 = class
// calls imp or objc_msgSend_uncached
CacheLookup NORMAL, _objc_msgSendSuper
b L_objc_msgSendSuper2_body
END_ENTRY _objc_msgSendSuper
@ -475,9 +639,18 @@ LLookup_Nil:
ENTRY _objc_msgSendSuper2
UNWIND _objc_msgSendSuper2, NoFrame
#if __has_feature(ptrauth_calls)
ldp x0, x17, [x0] // x0 = real receiver, x17 = class
add x17, x17, #SUPERCLASS // x17 = &class->superclass
ldr x16, [x17] // x16 = class->superclass
AuthISASuper x16, x17, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS
LMsgSendSuperResume:
#else
ldp p0, p16, [x0] // p0 = real receiver, p16 = class
ldr p16, [x16, #SUPERCLASS] // p16 = class->superclass
CacheLookup NORMAL, _objc_msgSendSuper2
#endif
L_objc_msgSendSuper2_body:
CacheLookup NORMAL, _objc_msgSendSuper2, __objc_msgSend_uncached
END_ENTRY _objc_msgSendSuper2
@ -485,31 +658,24 @@ LLookup_Nil:
ENTRY _objc_msgLookupSuper2
UNWIND _objc_msgLookupSuper2, NoFrame
#if __has_feature(ptrauth_calls)
ldp x0, x17, [x0] // x0 = real receiver, x17 = class
add x17, x17, #SUPERCLASS // x17 = &class->superclass
ldr x16, [x17] // x16 = class->superclass
AuthISASuper x16, x17, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS
LMsgLookupSuperResume:
#else
ldp p0, p16, [x0] // p0 = real receiver, p16 = class
ldr p16, [x16, #SUPERCLASS] // p16 = class->superclass
CacheLookup LOOKUP, _objc_msgLookupSuper2
#endif
CacheLookup LOOKUP, _objc_msgLookupSuper2, __objc_msgLookup_uncached
END_ENTRY _objc_msgLookupSuper2
.macro MethodTableLookup
// push frame
SignLR
stp fp, lr, [sp, #-16]!
mov fp, sp
// save parameter registers: x0..x8, q0..q7
sub sp, sp, #(10*8 + 8*16)
stp q0, q1, [sp, #(0*16)]
stp q2, q3, [sp, #(2*16)]
stp q4, q5, [sp, #(4*16)]
stp q6, q7, [sp, #(6*16)]
stp x0, x1, [sp, #(8*16+0*8)]
stp x2, x3, [sp, #(8*16+2*8)]
stp x4, x5, [sp, #(8*16+4*8)]
stp x6, x7, [sp, #(8*16+6*8)]
str x8, [sp, #(8*16+8*8)]
SAVE_REGS MSGSEND
// lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
// receiver and selector already in x0 and x1
@ -520,20 +686,7 @@ LLookup_Nil:
// IMP in x0
mov x17, x0
// restore registers and return
ldp q0, q1, [sp, #(0*16)]
ldp q2, q3, [sp, #(2*16)]
ldp q4, q5, [sp, #(4*16)]
ldp q6, q7, [sp, #(6*16)]
ldp x0, x1, [sp, #(8*16+0*8)]
ldp x2, x3, [sp, #(8*16+2*8)]
ldp x4, x5, [sp, #(8*16+4*8)]
ldp x6, x7, [sp, #(8*16+6*8)]
ldr x8, [sp, #(8*16+8*8)]
mov sp, fp
ldp fp, lr, [sp], #16
AuthenticateLR
RESTORE_REGS MSGSEND
.endmacro
@ -541,7 +694,7 @@ LLookup_Nil:
UNWIND __objc_msgSend_uncached, FrameWithNoSaves
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band p16 is the class to search
// Out-of-band p15 is the class to search
MethodTableLookup
TailCallFunctionPointer x17
@ -553,7 +706,7 @@ LLookup_Nil:
UNWIND __objc_msgLookup_uncached, FrameWithNoSaves
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band p16 is the class to search
// Out-of-band p15 is the class to search
MethodTableLookup
ret
@ -563,13 +716,17 @@ LLookup_Nil:
STATIC_ENTRY _cache_getImp
GetClassFromIsa_p16 p0
CacheLookup GETIMP, _cache_getImp
GetClassFromIsa_p16 p0, 0
CacheLookup GETIMP, _cache_getImp, LGetImpMissDynamic, LGetImpMissConstant
LGetImpMiss:
LGetImpMissDynamic:
mov p0, #0
ret
LGetImpMissConstant:
mov p0, p2
ret
END_ENTRY _cache_getImp
@ -615,11 +772,37 @@ LGetImpMiss:
ENTRY _method_invoke
// See if this is a small method.
tbnz p1, #0, L_method_invoke_small
// We can directly load the IMP from big methods.
// x1 is method triplet instead of SEL
add p16, p1, #METHOD_IMP
ldr p17, [x16]
ldr p1, [x1, #METHOD_NAME]
TailCallMethodListImp x17, x16
L_method_invoke_small:
// Small methods require a call to handle swizzling.
SAVE_REGS METHOD_INVOKE
mov p0, p1
bl __method_getImplementationAndName
// ARM64_32 packs both return values into x0, with SEL in the high bits and IMP in the low.
// ARM64 just returns them in x0 and x1.
mov x17, x0
#if __LP64__
mov x16, x1
#endif
RESTORE_REGS METHOD_INVOKE
#if __LP64__
mov x1, x16
#else
lsr x1, x17, #32
mov w17, w17
#endif
TailCallFunctionPointer x17
END_ENTRY _method_invoke
#endif

View File

@ -192,6 +192,47 @@ LExit$0:
#define FrameWithNoSaves 0x01000000 // frame, no non-volatile saves
//////////////////////////////////////////////////////////////////////
//
// SAVE_REGS
//
// Create a stack frame and save all argument registers in preparation
// for a function call.
//////////////////////////////////////////////////////////////////////
.macro SAVE_REGS
pushl %ebp
movl %esp, %ebp
subl $$(8+5*16), %esp
movdqa %xmm3, 4*16(%esp)
movdqa %xmm2, 3*16(%esp)
movdqa %xmm1, 2*16(%esp)
movdqa %xmm0, 1*16(%esp)
.endmacro
//////////////////////////////////////////////////////////////////////
//
// RESTORE_REGS
//
// Restore all argument registers and pop the stack frame created by
// SAVE_REGS.
//////////////////////////////////////////////////////////////////////
.macro RESTORE_REGS
movdqa 4*16(%esp), %xmm3
movdqa 3*16(%esp), %xmm2
movdqa 2*16(%esp), %xmm1
movdqa 1*16(%esp), %xmm0
leave
.endmacro
/////////////////////////////////////////////////////////////////////
//
// CacheLookup return-type, caller
@ -314,10 +355,7 @@ LExit$0:
/////////////////////////////////////////////////////////////////////
.macro MethodTableLookup
pushl %ebp
movl %esp, %ebp
subl $$(8+5*16), %esp
SAVE_REGS
.if $0 == NORMAL
movl self+4(%ebp), %eax
@ -327,11 +365,6 @@ LExit$0:
movl selector_stret+4(%ebp), %ecx
.endif
movdqa %xmm3, 4*16(%esp)
movdqa %xmm2, 3*16(%esp)
movdqa %xmm1, 2*16(%esp)
movdqa %xmm0, 1*16(%esp)
// lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
movl $$3, 12(%esp) // LOOKUP_INITIALIZE | LOOKUP_RESOLVER
movl %edx, 8(%esp) // class
@ -341,18 +374,13 @@ LExit$0:
// imp in eax
movdqa 4*16(%esp), %xmm3
movdqa 3*16(%esp), %xmm2
movdqa 2*16(%esp), %xmm1
movdqa 1*16(%esp), %xmm0
.if $0 == NORMAL
test %eax, %eax // set ne for stret forwarding
.else
cmp %eax, %eax // set eq for nonstret forwarding
.endif
leave
RESTORE_REGS
.endmacro
@ -906,23 +934,55 @@ L_forward_stret_handler:
ENTRY _method_invoke
// See if this is a small method.
testb $1, selector(%esp)
jnz L_method_invoke_small
// We can directly load the IMP from big methods.
movl selector(%esp), %ecx
movl method_name(%ecx), %edx
movl method_imp(%ecx), %eax
movl %edx, selector(%esp)
jmp *%eax
L_method_invoke_small:
// Small methods require a call to handle swizzling.
SAVE_REGS
movl selector+4(%ebp), %eax
movl %eax, 0(%esp)
call __method_getImplementationAndName
RESTORE_REGS
movl %edx, selector(%esp)
jmp *%eax
END_ENTRY _method_invoke
ENTRY _method_invoke_stret
// See if this is a small method.
testb $1, selector_stret(%esp)
jnz L_method_invoke_stret_small
// We can directly load the IMP from big methods.
movl selector_stret(%esp), %ecx
movl method_name(%ecx), %edx
movl method_imp(%ecx), %eax
movl %edx, selector_stret(%esp)
jmp *%eax
L_method_invoke_stret_small:
// Small methods require a call to handle swizzling.
SAVE_REGS
movl selector_stret+4(%ebp), %eax
movl %eax, 0(%esp)
call __method_getImplementationAndName
RESTORE_REGS
movl %edx, selector_stret(%esp)
jmp *%eax
END_ENTRY _method_invoke_stret

View File

@ -22,7 +22,7 @@
*/
#include <TargetConditionals.h>
#if __x86_64__ && TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC
#if __x86_64__ && TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST
/********************************************************************
********************************************************************
@ -93,6 +93,7 @@ _objc_restartableRanges:
#define a2b sil
#define a3 rdx
#define a3d edx
#define a3b dl
#define a4 rcx
#define a4d ecx
#define a5 r8
@ -132,6 +133,10 @@ _objc_restartableRanges:
#define GETIMP 101
#define LOOKUP 102
#define MSGSEND 200
#define METHOD_INVOKE 201
#define METHOD_INVOKE_STRET 202
/********************************************************************
*
@ -212,6 +217,88 @@ LExit$0:
#define FrameWithNoSaves 0x01000000 // frame, no non-volatile saves
//////////////////////////////////////////////////////////////////////
//
// SAVE_REGS
//
// Create a stack frame and save all argument registers in preparation
// for a function call.
//////////////////////////////////////////////////////////////////////
.macro SAVE_REGS kind
.if \kind != MSGSEND && \kind != METHOD_INVOKE && \kind != METHOD_INVOKE_STRET
.abort Unknown kind.
.endif
push %rbp
mov %rsp, %rbp
sub $0x80, %rsp
movdqa %xmm0, -0x80(%rbp)
push %rax // might be xmm parameter count
movdqa %xmm1, -0x70(%rbp)
push %a1
movdqa %xmm2, -0x60(%rbp)
.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET
push %a2
.endif
movdqa %xmm3, -0x50(%rbp)
.if \kind == MSGSEND || \kind == METHOD_INVOKE
push %a3
.endif
movdqa %xmm4, -0x40(%rbp)
push %a4
movdqa %xmm5, -0x30(%rbp)
push %a5
movdqa %xmm6, -0x20(%rbp)
push %a6
movdqa %xmm7, -0x10(%rbp)
.if \kind == MSGSEND
push %r10
.endif
.endmacro
//////////////////////////////////////////////////////////////////////
//
// RESTORE_REGS
//
// Restore all argument registers and pop the stack frame created by
// SAVE_REGS.
//////////////////////////////////////////////////////////////////////
.macro RESTORE_REGS kind
.if \kind == MSGSEND
pop %r10
orq $2, %r10 // for the sake of instrumentations, remember it was the slowpath
.endif
movdqa -0x80(%rbp), %xmm0
pop %a6
movdqa -0x70(%rbp), %xmm1
pop %a5
movdqa -0x60(%rbp), %xmm2
pop %a4
movdqa -0x50(%rbp), %xmm3
.if \kind == MSGSEND || \kind == METHOD_INVOKE
pop %a3
.endif
movdqa -0x40(%rbp), %xmm4
.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET
pop %a2
.endif
movdqa -0x30(%rbp), %xmm5
pop %a1
movdqa -0x20(%rbp), %xmm6
pop %rax
movdqa -0x10(%rbp), %xmm7
leave
.endmacro
/////////////////////////////////////////////////////////////////////
//
// CacheLookup return-type, caller
@ -347,26 +434,7 @@ LExit$0:
.macro MethodTableLookup
push %rbp
mov %rsp, %rbp
sub $$0x80+8, %rsp // +8 for alignment
movdqa %xmm0, -0x80(%rbp)
push %rax // might be xmm parameter count
movdqa %xmm1, -0x70(%rbp)
push %a1
movdqa %xmm2, -0x60(%rbp)
push %a2
movdqa %xmm3, -0x50(%rbp)
push %a3
movdqa %xmm4, -0x40(%rbp)
push %a4
movdqa %xmm5, -0x30(%rbp)
push %a5
movdqa %xmm6, -0x20(%rbp)
push %a6
movdqa %xmm7, -0x10(%rbp)
SAVE_REGS MSGSEND
// lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
.if $0 == NORMAL
@ -383,21 +451,7 @@ LExit$0:
// IMP is now in %rax
movq %rax, %r11
movdqa -0x80(%rbp), %xmm0
pop %a6
movdqa -0x70(%rbp), %xmm1
pop %a5
movdqa -0x60(%rbp), %xmm2
pop %a4
movdqa -0x50(%rbp), %xmm3
pop %a3
movdqa -0x40(%rbp), %xmm4
pop %a2
movdqa -0x30(%rbp), %xmm5
pop %a1
movdqa -0x20(%rbp), %xmm6
pop %rax
movdqa -0x10(%rbp), %xmm7
RESTORE_REGS MSGSEND
.if $0 == NORMAL
test %r11, %r11 // set ne for stret forwarding
@ -405,8 +459,6 @@ LExit$0:
cmp %r11, %r11 // set eq for nonstret forwarding
.endif
leave
.endmacro
@ -1104,19 +1156,49 @@ LCacheMiss:
ENTRY _method_invoke
// See if this is a small method.
testb $1, %a2b
jnz L_method_invoke_small
// We can directly load the IMP from big methods.
movq method_imp(%a2), %r11
movq method_name(%a2), %a2
jmp *%r11
L_method_invoke_small:
// Small methods require a call to handle swizzling.
SAVE_REGS METHOD_INVOKE
movq %a2, %a1
call __method_getImplementationAndName
movq %rdx, %a2
movq %rax, %r11
RESTORE_REGS METHOD_INVOKE
jmp *%r11
END_ENTRY _method_invoke
ENTRY _method_invoke_stret
// See if this is a small method.
testb $1, %a3b
jnz L_method_invoke_stret_small
// We can directly load the IMP from big methods.
movq method_imp(%a3), %r11
movq method_name(%a3), %a3
jmp *%r11
L_method_invoke_stret_small:
// Small methods require a call to handle swizzling.
SAVE_REGS METHOD_INVOKE_STRET
movq %a3, %a1
call __method_getImplementationAndName
movq %rdx, %a3
movq %rax, %r11
RESTORE_REGS METHOD_INVOKE_STRET
jmp *%r11
END_ENTRY _method_invoke_stret

View File

@ -22,7 +22,7 @@
*/
#include <TargetConditionals.h>
#if __x86_64__ && !(TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC)
#if __x86_64__ && !(TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST)
#include "isa.h"
@ -102,6 +102,7 @@ _objc_restartableRanges:
#define a2b sil
#define a3 rdx
#define a3d edx
#define a3b dl
#define a4 rcx
#define a4d ecx
#define a5 r8
@ -138,6 +139,10 @@ _objc_restartableRanges:
#define GETIMP 101
#define LOOKUP 102
#define MSGSEND 200
#define METHOD_INVOKE 201
#define METHOD_INVOKE_STRET 202
/********************************************************************
*
@ -218,6 +223,88 @@ LExit$0:
#define FrameWithNoSaves 0x01000000 // frame, no non-volatile saves
//////////////////////////////////////////////////////////////////////
//
// SAVE_REGS
//
// Create a stack frame and save all argument registers in preparation
// for a function call.
//////////////////////////////////////////////////////////////////////
.macro SAVE_REGS kind
.if \kind != MSGSEND && \kind != METHOD_INVOKE && \kind != METHOD_INVOKE_STRET
.abort Unknown kind.
.endif
push %rbp
mov %rsp, %rbp
sub $0x80, %rsp
movdqa %xmm0, -0x80(%rbp)
push %rax // might be xmm parameter count
movdqa %xmm1, -0x70(%rbp)
push %a1
movdqa %xmm2, -0x60(%rbp)
.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET
push %a2
.endif
movdqa %xmm3, -0x50(%rbp)
.if \kind == MSGSEND || \kind == METHOD_INVOKE
push %a3
.endif
movdqa %xmm4, -0x40(%rbp)
push %a4
movdqa %xmm5, -0x30(%rbp)
push %a5
movdqa %xmm6, -0x20(%rbp)
push %a6
movdqa %xmm7, -0x10(%rbp)
.if \kind == MSGSEND
push %r10
.endif
.endmacro
//////////////////////////////////////////////////////////////////////
//
// RESTORE_REGS
//
// Restore all argument registers and pop the stack frame created by
// SAVE_REGS.
//////////////////////////////////////////////////////////////////////
.macro RESTORE_REGS kind
.if \kind == MSGSEND
pop %r10
orq $2, %r10 // for the sake of instrumentations, remember it was the slowpath
.endif
movdqa -0x80(%rbp), %xmm0
pop %a6
movdqa -0x70(%rbp), %xmm1
pop %a5
movdqa -0x60(%rbp), %xmm2
pop %a4
movdqa -0x50(%rbp), %xmm3
.if \kind == MSGSEND || \kind == METHOD_INVOKE
pop %a3
.endif
movdqa -0x40(%rbp), %xmm4
.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET
pop %a2
.endif
movdqa -0x30(%rbp), %xmm5
pop %a1
movdqa -0x20(%rbp), %xmm6
pop %rax
movdqa -0x10(%rbp), %xmm7
leave
.endmacro
/////////////////////////////////////////////////////////////////////
//
// CacheLookup return-type, caller, function
@ -382,26 +469,7 @@ LLookupEnd$2:
.macro MethodTableLookup
push %rbp
mov %rsp, %rbp
sub $$0x80+8, %rsp // +8 for alignment
movdqa %xmm0, -0x80(%rbp)
push %rax // might be xmm parameter count
movdqa %xmm1, -0x70(%rbp)
push %a1
movdqa %xmm2, -0x60(%rbp)
push %a2
movdqa %xmm3, -0x50(%rbp)
push %a3
movdqa %xmm4, -0x40(%rbp)
push %a4
movdqa %xmm5, -0x30(%rbp)
push %a5
movdqa %xmm6, -0x20(%rbp)
push %a6
movdqa %xmm7, -0x10(%rbp)
SAVE_REGS MSGSEND
// lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
.if $0 == NORMAL
@ -418,21 +486,7 @@ LLookupEnd$2:
// IMP is now in %rax
movq %rax, %r11
movdqa -0x80(%rbp), %xmm0
pop %a6
movdqa -0x70(%rbp), %xmm1
pop %a5
movdqa -0x60(%rbp), %xmm2
pop %a4
movdqa -0x50(%rbp), %xmm3
pop %a3
movdqa -0x40(%rbp), %xmm4
pop %a2
movdqa -0x30(%rbp), %xmm5
pop %a1
movdqa -0x20(%rbp), %xmm6
pop %rax
movdqa -0x10(%rbp), %xmm7
RESTORE_REGS MSGSEND
.if $0 == NORMAL
test %r11, %r11 // set ne for nonstret forwarding
@ -440,8 +494,6 @@ LLookupEnd$2:
cmp %r11, %r11 // set eq for stret forwarding
.endif
leave
.endmacro
@ -1216,19 +1268,49 @@ LCacheMiss_objc_msgLookupSuper2_stret:
ENTRY _method_invoke
// See if this is a small method.
testb $1, %a2b
jnz L_method_invoke_small
// We can directly load the IMP from big methods.
movq method_imp(%a2), %r11
movq method_name(%a2), %a2
jmp *%r11
L_method_invoke_small:
// Small methods require a call to handle swizzling.
SAVE_REGS METHOD_INVOKE
movq %a2, %a1
call __method_getImplementationAndName
movq %rdx, %a2
movq %rax, %r11
RESTORE_REGS METHOD_INVOKE
jmp *%r11
END_ENTRY _method_invoke
ENTRY _method_invoke_stret
// See if this is a small method.
testb $1, %a3b
jnz L_method_invoke_stret_small
// We can directly load the IMP from big methods.
movq method_imp(%a3), %r11
movq method_name(%a3), %a3
jmp *%r11
L_method_invoke_stret_small:
// Small methods require a call to handle swizzling.
SAVE_REGS METHOD_INVOKE_STRET
movq %a3, %a1
call __method_getImplementationAndName
movq %rdx, %a3
movq %rax, %r11
RESTORE_REGS METHOD_INVOKE_STRET
jmp *%r11
END_ENTRY _method_invoke_stret

View File

@ -123,6 +123,16 @@ struct magic_t {
class AutoreleasePoolPage;
struct AutoreleasePoolPageData
{
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
struct AutoreleasePoolEntry {
uintptr_t ptr: 48;
uintptr_t count: 16;
static const uintptr_t maxCount = 65535; // 2^16 - 1
};
static_assert((AutoreleasePoolEntry){ .ptr = MACH_VM_MAX_ADDRESS }.ptr == MACH_VM_MAX_ADDRESS, "MACH_VM_MAX_ADDRESS doesn't fit into AutoreleasePoolEntry::ptr!");
#endif
magic_t const magic;
__unsafe_unretained id *next;
pthread_t const thread;

View File

@ -39,6 +39,12 @@
#include <map>
#include <execinfo.h>
#include "NSObject-internal.h"
#include <os/feature_private.h>
extern "C" {
#include <os/reason_private.h>
#include <os/variant_private.h>
}
@interface NSInvocation
- (SEL)selector;
@ -51,7 +57,13 @@ OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset = __buil
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset = __builtin_offsetof(AutoreleasePoolPageData, child);
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset = __builtin_offsetof(AutoreleasePoolPageData, depth);
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset = __builtin_offsetof(AutoreleasePoolPageData, hiwat);
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_begin_offset = sizeof(AutoreleasePoolPageData);
#if __OBJC2__
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask = (AutoreleasePoolPageData::AutoreleasePoolEntry){ .ptr = ~(uintptr_t)0 }.ptr;
#else
OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask = ~(uintptr_t)0;
#endif
OBJC_EXTERN const uint32_t objc_class_abi_version = OBJC_CLASS_ABI_VERSION_MAX;
#endif
@ -79,8 +91,42 @@ void _objc_setBadAllocHandler(id(*newHandler)(Class))
}
static id _initializeSwiftRefcountingThenCallRetain(id objc);
static void _initializeSwiftRefcountingThenCallRelease(id objc);
explicit_atomic<id(*)(id)> swiftRetain{&_initializeSwiftRefcountingThenCallRetain};
explicit_atomic<void(*)(id)> swiftRelease{&_initializeSwiftRefcountingThenCallRelease};
static void _initializeSwiftRefcounting() {
void *const token = dlopen("/usr/lib/swift/libswiftCore.dylib", RTLD_LAZY | RTLD_LOCAL);
ASSERT(token);
swiftRetain.store((id(*)(id))dlsym(token, "swift_retain"), memory_order_relaxed);
ASSERT(swiftRetain.load(memory_order_relaxed));
swiftRelease.store((void(*)(id))dlsym(token, "swift_release"), memory_order_relaxed);
ASSERT(swiftRelease.load(memory_order_relaxed));
dlclose(token);
}
static id _initializeSwiftRefcountingThenCallRetain(id objc) {
_initializeSwiftRefcounting();
return swiftRetain.load(memory_order_relaxed)(objc);
}
static void _initializeSwiftRefcountingThenCallRelease(id objc) {
_initializeSwiftRefcounting();
swiftRelease.load(memory_order_relaxed)(objc);
}
namespace objc {
extern int PageCountWarning;
}
namespace {
#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
uint32_t numFaults = 0;
#endif
// The order of these bits is important.
#define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
#define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
@ -221,6 +267,23 @@ void SideTableLocksSucceedLocks(StripedMap<spinlock_t>& oldlocks) {
}
}
// Call out to the _setWeaklyReferenced method on obj, if implemented.
static void callSetWeaklyReferenced(id obj) {
if (!obj)
return;
Class cls = obj->getIsa();
if (slowpath(cls->hasCustomRR() && !object_isClass(obj))) {
ASSERT(((objc_class *)cls)->isInitializing() || ((objc_class *)cls)->isInitialized());
void (*setWeaklyReferenced)(id, SEL) = (void(*)(id, SEL))
class_getMethodImplementation(cls, @selector(_setWeaklyReferenced));
if ((IMP)setWeaklyReferenced != _objc_msgForward) {
(*setWeaklyReferenced)(obj, @selector(_setWeaklyReferenced));
}
}
}
//
// The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
//
@ -269,7 +332,7 @@ enum CrashIfDeallocating {
DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
};
template <HaveOld haveOld, HaveNew haveNew,
CrashIfDeallocating crashIfDeallocating>
enum CrashIfDeallocating crashIfDeallocating>
static id
storeWeak(id *location, objc_object *newObj)
{
@ -336,11 +399,11 @@ storeWeak(id *location, objc_object *newObj)
if (haveNew) {
newObj = (objc_object *)
weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
crashIfDeallocating);
crashIfDeallocating ? CrashIfDeallocating : ReturnNilIfDeallocating);
// weak_register_no_lock returns nil if weak store should be rejected
// Set is-weakly-referenced bit in refcount table.
if (newObj && !newObj->isTaggedPointer()) {
if (!newObj->isTaggedPointerOrNil()) {
newObj->setWeaklyReferenced_nolock();
}
@ -353,6 +416,12 @@ storeWeak(id *location, objc_object *newObj)
SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
// This must be called without the locks held, as it can invoke
// arbitrary code. In particular, even if _setWeaklyReferenced
// is not implemented, resolveInstanceMethod: may be, and may
// call back into the weak reference machinery.
callSetWeaklyReferenced((id)newObj);
return (id)newObj;
}
@ -474,8 +543,7 @@ objc_loadWeakRetained(id *location)
retry:
// fixme std::atomic this load
obj = *location;
if (!obj) return nil;
if (obj->isTaggedPointer()) return obj;
if (obj->isTaggedPointerOrNil()) return obj;
table = &SideTables()[obj];
@ -499,9 +567,12 @@ objc_loadWeakRetained(id *location)
else {
// Slow case. We must check for +initialize and call it outside
// the lock if necessary in order to avoid deadlocks.
// Use lookUpImpOrForward so we can avoid the assert in
// class_getInstanceMethod, since we intentionally make this
// callout with the lock held.
if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
class_getMethodImplementation(cls, @selector(retainWeakReference));
lookUpImpOrForwardTryCache(obj, @selector(retainWeakReference), cls);
if ((IMP)tryRetain == _objc_msgForward) {
result = nil;
}
@ -572,9 +643,28 @@ objc_copyWeak(id *dst, id *src)
void
objc_moveWeak(id *dst, id *src)
{
objc_copyWeak(dst, src);
objc_destroyWeak(src);
id obj;
SideTable *table;
retry:
obj = *src;
if (obj == nil) {
*dst = nil;
return;
}
table = &SideTables()[obj];
table->lock();
if (*src != obj) {
table->unlock();
goto retry;
}
weak_unregister_no_lock(&table->weak_table, obj, src);
weak_register_no_lock(&table->weak_table, obj, dst, DontCheckDeallocating);
*dst = obj;
*src = nil;
table->unlock();
}
@ -611,6 +701,7 @@ private:
static pthread_key_t const key = AUTORELEASE_POOL_KEY;
static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
static size_t const COUNT = SIZE / sizeof(id);
static size_t const MAX_FAULTS = 2;
// EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
// pushed and it has never contained any objects. This saves memory
@ -643,6 +734,22 @@ private:
#endif
}
void checkTooMuchAutorelease()
{
#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
bool objcModeNoFaults = DisableFaults || getpid() == 1 ||
!os_variant_has_internal_diagnostics("com.apple.obj-c");
if (!objcModeNoFaults) {
if (depth+1 >= (uint32_t)objc::PageCountWarning && numFaults < MAX_FAULTS) { //depth is 0 when first page is allocated
os_fault_with_payload(OS_REASON_LIBSYSTEM,
OS_REASON_LIBSYSTEM_CODE_FAULT,
NULL, 0, "Large Autorelease Pool", 0);
numFaults++;
}
}
#endif
}
AutoreleasePoolPage(AutoreleasePoolPage *newParent) :
AutoreleasePoolPageData(begin(),
objc_thread_self(),
@ -650,6 +757,10 @@ private:
newParent ? 1+newParent->depth : 0,
newParent ? newParent->hiwat : 0)
{
if (objc::PageCountWarning != -1) {
checkTooMuchAutorelease();
}
if (parent) {
parent->check();
ASSERT(!parent->child);
@ -744,8 +855,49 @@ private:
{
ASSERT(!full());
unprotect();
id *ret = next; // faster than `return next-1` because of aliasing
id *ret;
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
if (!DisableAutoreleaseCoalescing || !DisableAutoreleaseCoalescingLRU) {
if (!DisableAutoreleaseCoalescingLRU) {
if (!empty() && (obj != POOL_BOUNDARY)) {
AutoreleasePoolEntry *topEntry = (AutoreleasePoolEntry *)next - 1;
for (uintptr_t offset = 0; offset < 4; offset++) {
AutoreleasePoolEntry *offsetEntry = topEntry - offset;
if (offsetEntry <= (AutoreleasePoolEntry*)begin() || *(id *)offsetEntry == POOL_BOUNDARY) {
break;
}
if (offsetEntry->ptr == (uintptr_t)obj && offsetEntry->count < AutoreleasePoolEntry::maxCount) {
if (offset > 0) {
AutoreleasePoolEntry found = *offsetEntry;
memmove(offsetEntry, offsetEntry + 1, offset * sizeof(*offsetEntry));
*topEntry = found;
}
topEntry->count++;
ret = (id *)topEntry; // need to reset ret
goto done;
}
}
}
} else {
if (!empty() && (obj != POOL_BOUNDARY)) {
AutoreleasePoolEntry *prevEntry = (AutoreleasePoolEntry *)next - 1;
if (prevEntry->ptr == (uintptr_t)obj && prevEntry->count < AutoreleasePoolEntry::maxCount) {
prevEntry->count++;
ret = (id *)prevEntry; // need to reset ret
goto done;
}
}
}
}
#endif
ret = next; // faster than `return next-1` because of aliasing
*next++ = obj;
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
// Make sure obj fits in the bits available for it
ASSERT(((AutoreleasePoolEntry *)ret)->ptr == (uintptr_t)obj);
#endif
done:
protect();
return ret;
}
@ -772,13 +924,29 @@ private:
}
page->unprotect();
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
AutoreleasePoolEntry* entry = (AutoreleasePoolEntry*) --page->next;
// create an obj with the zeroed out top byte and release that
id obj = (id)entry->ptr;
int count = (int)entry->count; // grab these before memset
#else
id obj = *--page->next;
#endif
memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
page->protect();
if (obj != POOL_BOUNDARY) {
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
// release count+1 times since it is count of the additional
// autoreleases beyond the first one
for (int i = 0; i < count + 1; i++) {
objc_release(obj);
}
#else
objc_release(obj);
#endif
}
}
setHotPage(this);
@ -984,10 +1152,13 @@ private:
public:
static inline id autorelease(id obj)
{
ASSERT(obj);
ASSERT(!obj->isTaggedPointer());
ASSERT(!obj->isTaggedPointerOrNil());
id *dest __unused = autoreleaseFast(obj);
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || (id)((AutoreleasePoolEntry *)dest)->ptr == obj);
#else
ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
#endif
return obj;
}
@ -1024,9 +1195,9 @@ public:
_objc_inform_now_and_on_crash
("Invalid or prematurely-freed autorelease pool %p. "
"Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
"Proceeding anyway because the app is old "
"(SDK version " SDK_FORMAT "). Memory errors are likely.",
token, FORMAT_SDK(sdkVersion()));
"Proceeding anyway because the app is old. Memory errors "
"are likely.",
token);
}
objc_autoreleasePoolInvalid(token);
}
@ -1127,8 +1298,19 @@ public:
if (*p == POOL_BOUNDARY) {
_objc_inform("[%p] ################ POOL %p", p, p);
} else {
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
AutoreleasePoolEntry *entry = (AutoreleasePoolEntry *)p;
if (entry->count > 0) {
id obj = (id)entry->ptr;
_objc_inform("[%p] %#16lx %s autorelease count %u",
p, (unsigned long)obj, object_getClassName(obj),
entry->count + 1);
goto done;
}
#endif
_objc_inform("[%p] %#16lx %s",
p, (unsigned long)*p, object_getClassName(*p));
done:;
}
}
}
@ -1161,6 +1343,20 @@ public:
_objc_inform("##############");
}
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
__attribute__((noinline, cold))
unsigned sumOfExtraReleases()
{
unsigned sumOfExtraReleases = 0;
for (id *p = begin(); p < next; p++) {
if (*p != POOL_BOUNDARY) {
sumOfExtraReleases += ((AutoreleasePoolEntry *)p)->count;
}
}
return sumOfExtraReleases;
}
#endif
__attribute__((noinline, cold))
static void printHiwat()
{
@ -1168,16 +1364,29 @@ public:
// Ignore high water marks under 256 to suppress noise.
AutoreleasePoolPage *p = hotPage();
uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
if (mark > p->hiwat && mark > 256) {
if (mark > p->hiwat + 256) {
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
unsigned sumOfExtraReleases = 0;
#endif
for( ; p; p = p->parent) {
p->unprotect();
p->hiwat = mark;
p->protect();
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
sumOfExtraReleases += p->sumOfExtraReleases();
#endif
}
_objc_inform("POOL HIGHWATER: new high water mark of %u "
"pending releases for thread %p:",
mark, objc_thread_self());
#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
if (sumOfExtraReleases > 0) {
_objc_inform("POOL HIGHWATER: extra sequential autoreleases of objects: %u",
sumOfExtraReleases);
}
#endif
void *stack[128];
int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
@ -1201,14 +1410,14 @@ public:
NEVER_INLINE id
objc_object::rootRetain_overflow(bool tryRetain)
{
return rootRetain(tryRetain, true);
return rootRetain(tryRetain, RRVariant::Full);
}
NEVER_INLINE uintptr_t
objc_object::rootRelease_underflow(bool performDealloc)
{
return rootRelease(performDealloc, true);
return rootRelease(performDealloc, RRVariant::Full);
}
@ -1317,7 +1526,7 @@ objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
uintptr_t carry;
size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
size_t refcnt = addc(oldRefcnt, (extra_rc - 1) << SIDE_TABLE_RC_SHIFT, 0, &carry);
if (carry) refcnt = SIDE_TABLE_RC_PINNED;
if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
@ -1359,7 +1568,7 @@ objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
// Move some retain counts from the side table to the isa field.
// Returns the actual count subtracted, which may be less than the request.
size_t
objc_object::SidetableBorrow
objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
{
ASSERT(isa.nonpointer);
@ -1368,7 +1577,7 @@ objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
RefcountMap::iterator it = table.refcnts.find(this);
if (it == table.refcnts.end() || it->second == 0) {
// Side table retain count is zero. Can't borrow.
return 0;
return { 0, 0 };
}
size_t oldRefcnt = it->second;
@ -1379,7 +1588,7 @@ objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
ASSERT(oldRefcnt > newRefcnt); // shouldn't underflow
it->second = newRefcnt;
return delta_rc;
return { delta_rc, newRefcnt >> SIDE_TABLE_RC_SHIFT };
}
@ -1394,19 +1603,29 @@ objc_object::sidetable_getExtraRC_nolock()
}
void
objc_object::sidetable_clearExtraRC_nolock()
{
ASSERT(isa.nonpointer);
SideTable& table = SideTables()[this];
RefcountMap::iterator it = table.refcnts.find(this);
table.refcnts.erase(it);
}
// SUPPORT_NONPOINTER_ISA
#endif
id
objc_object::sidetable_retain()
objc_object::sidetable_retain(bool locked)
{
#if SUPPORT_NONPOINTER_ISA
ASSERT(!isa.nonpointer);
#endif
SideTable& table = SideTables()[this];
table.lock();
if (!locked) table.lock();
size_t& refcntStorage = table.refcnts[this];
if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
refcntStorage += SIDE_TABLE_RC_ONE;
@ -1505,6 +1724,14 @@ objc_object::sidetable_isWeaklyReferenced()
return result;
}
#if OBJC_WEAK_FORMATION_CALLOUT_DEFINED
//Clients can dlsym() for this symbol to see if an ObjC supporting
//-_setWeaklyReferenced is present
OBJC_EXPORT const uintptr_t _objc_has_weak_formation_callout = 0;
static_assert(SUPPORT_NONPOINTER_ISA, "Weak formation callout must only be defined when nonpointer isa is supported.");
#else
static_assert(!SUPPORT_NONPOINTER_ISA, "If weak callout is not present then we must not support nonpointer isas.");
#endif
void
objc_object::sidetable_setWeaklyReferenced_nolock()
@ -1523,7 +1750,7 @@ objc_object::sidetable_setWeaklyReferenced_nolock()
// return uintptr_t instead of bool so that the various raw-isa
// -release paths all return zero in eax
uintptr_t
objc_object::sidetable_release(bool performDealloc)
objc_object::sidetable_release(bool locked, bool performDealloc)
{
#if SUPPORT_NONPOINTER_ISA
ASSERT(!isa.nonpointer);
@ -1532,7 +1759,7 @@ objc_object::sidetable_release(bool performDealloc)
bool do_dealloc = false;
table.lock();
if (!locked) table.lock();
auto it = table.refcnts.try_emplace(this, SIDE_TABLE_DEALLOCATING);
auto &refcnt = it.first->second;
if (it.second) {
@ -1583,8 +1810,7 @@ __attribute__((aligned(16), flatten, noinline))
id
objc_retain(id obj)
{
if (!obj) return obj;
if (obj->isTaggedPointer()) return obj;
if (obj->isTaggedPointerOrNil()) return obj;
return obj->retain();
}
@ -1593,8 +1819,7 @@ __attribute__((aligned(16), flatten, noinline))
void
objc_release(id obj)
{
if (!obj) return;
if (obj->isTaggedPointer()) return;
if (obj->isTaggedPointerOrNil()) return;
return obj->release();
}
@ -1603,8 +1828,7 @@ __attribute__((aligned(16), flatten, noinline))
id
objc_autorelease(id obj)
{
if (!obj) return obj;
if (obj->isTaggedPointer()) return obj;
if (obj->isTaggedPointerOrNil()) return obj;
return obj->autorelease();
}
@ -1694,7 +1918,6 @@ _objc_rootRelease(id obj)
obj->rootRelease();
}
// Call [cls alloc] or [cls allocWithZone:nil], with appropriate
// shortcutting optimizations.
static ALWAYS_INLINE id
@ -1750,7 +1973,7 @@ objc_opt_new(Class cls)
{
#if __OBJC2__
if (fastpath(cls && !cls->ISA()->hasCustomCore())) {
return [callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/) init];
return [callAlloc(cls, false/*checkNil*/) init];
}
#endif
return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(new));
@ -1761,7 +1984,7 @@ id
objc_opt_self(id obj)
{
#if __OBJC2__
if (fastpath(!obj || obj->isTaggedPointer() || !obj->ISA()->hasCustomCore())) {
if (fastpath(obj->isTaggedPointerOrNil() || !obj->ISA()->hasCustomCore())) {
return obj;
}
#endif
@ -1790,7 +2013,7 @@ objc_opt_isKindOfClass(id obj, Class otherClass)
if (slowpath(!obj)) return NO;
Class cls = obj->getIsa();
if (fastpath(!cls->hasCustomCore())) {
for (Class tcls = cls; tcls; tcls = tcls->superclass) {
for (Class tcls = cls; tcls; tcls = tcls->getSuperclass()) {
if (tcls == otherClass) return YES;
}
return NO;
@ -1978,16 +2201,6 @@ void arr_init(void)
_objc_associations_init();
}
#ifdef DARLING
// see libdispatch
#if __has_attribute(objc_nonlazy_class)
#define NONLAZY_CLASS __attribute__((objc_nonlazy_class))
#define NONLAZY_CLASS_LOAD
#else
#define NONLAZY_CLASS
#define NONLAZY_CLASS_LOAD + (void)load {}
#endif
#endif
#if SUPPORT_TAGGED_POINTERS
@ -1997,15 +2210,8 @@ void arr_init(void)
@interface __NSUnrecognizedTaggedPointer : NSObject
@end
#ifdef DARLING
NONLAZY_CLASS
#else
__attribute__((objc_nonlazy_class))
#endif
@implementation __NSUnrecognizedTaggedPointer
#ifdef DARLING
NONLAZY_CLASS_LOAD
#endif
-(id) retain { return self; }
-(oneway void) release { }
-(id) autorelease { return self; }
@ -2013,15 +2219,8 @@ NONLAZY_CLASS_LOAD
#endif
#ifdef DARLING
NONLAZY_CLASS
#else
__attribute__((objc_nonlazy_class))
#endif
@implementation NSObject
#ifdef DARLING
NONLAZY_CLASS_LOAD
#endif
+ (void)initialize {
}
@ -2043,11 +2242,11 @@ NONLAZY_CLASS_LOAD
}
+ (Class)superclass {
return self->superclass;
return self->getSuperclass();
}
- (Class)superclass {
return [self class]->superclass;
return [self class]->getSuperclass();
}
+ (BOOL)isMemberOfClass:(Class)cls {
@ -2059,28 +2258,28 @@ NONLAZY_CLASS_LOAD
}
+ (BOOL)isKindOfClass:(Class)cls {
for (Class tcls = self->ISA(); tcls; tcls = tcls->superclass) {
for (Class tcls = self->ISA(); tcls; tcls = tcls->getSuperclass()) {
if (tcls == cls) return YES;
}
return NO;
}
- (BOOL)isKindOfClass:(Class)cls {
for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
for (Class tcls = [self class]; tcls; tcls = tcls->getSuperclass()) {
if (tcls == cls) return YES;
}
return NO;
}
+ (BOOL)isSubclassOfClass:(Class)cls {
for (Class tcls = self; tcls; tcls = tcls->superclass) {
for (Class tcls = self; tcls; tcls = tcls->getSuperclass()) {
if (tcls == cls) return YES;
}
return NO;
}
+ (BOOL)isAncestorOfObject:(NSObject *)obj {
for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
for (Class tcls = [obj class]; tcls; tcls = tcls->getSuperclass()) {
if (tcls == self) return YES;
}
return NO;
@ -2100,7 +2299,7 @@ NONLAZY_CLASS_LOAD
+ (BOOL)conformsToProtocol:(Protocol *)protocol {
if (!protocol) return NO;
for (Class tcls = self; tcls; tcls = tcls->superclass) {
for (Class tcls = self; tcls; tcls = tcls->getSuperclass()) {
if (class_conformsToProtocol(tcls, protocol)) return YES;
}
return NO;
@ -2108,7 +2307,7 @@ NONLAZY_CLASS_LOAD
- (BOOL)conformsToProtocol:(Protocol *)protocol {
if (!protocol) return NO;
for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
for (Class tcls = [self class]; tcls; tcls = tcls->getSuperclass()) {
if (class_conformsToProtocol(tcls, protocol)) return YES;
}
return NO;

View File

@ -59,12 +59,12 @@ struct PointerUnionTypeSelectorReturn<
typename PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>::Return;
};
template <class PT1, class PT2>
template <class T1, class T2, typename Auth1, typename Auth2>
class PointerUnion {
uintptr_t _value;
static_assert(alignof(PT1) >= 2, "alignment requirement");
static_assert(alignof(PT2) >= 2, "alignment requirement");
static_assert(alignof(T1) >= 2, "alignment requirement");
static_assert(alignof(T2) >= 2, "alignment requirement");
struct IsPT1 {
static const uintptr_t Num = 0;
@ -85,8 +85,12 @@ public:
explicit PointerUnion(const std::atomic<uintptr_t> &raw)
: _value(raw.load(std::memory_order_relaxed))
{ }
PointerUnion(PT1 t) : _value((uintptr_t)t) { }
PointerUnion(PT2 t) : _value((uintptr_t)t | 1) { }
PointerUnion(T1 *t, const void *address) {
_value = (uintptr_t)Auth1::sign(t, address);
}
PointerUnion(T2 *t, const void *address) {
_value = (uintptr_t)Auth2::sign(t, address) | 1;
}
void storeAt(std::atomic<uintptr_t> &raw, std::memory_order order) const {
raw.store(_value, order);
@ -94,20 +98,24 @@ public:
template <typename T>
bool is() const {
using Ty = typename PointerUnionTypeSelector<PT1, T, IsPT1,
PointerUnionTypeSelector<PT2, T, IsPT2,
using Ty = typename PointerUnionTypeSelector<T1 *, T, IsPT1,
PointerUnionTypeSelector<T2 *, T, IsPT2,
UNION_DOESNT_CONTAIN_TYPE<T>>>::Return;
return getTag() == Ty::Num;
}
template <typename T> T get() const {
template <typename T> T get(const void *address) const {
ASSERT(is<T>() && "Invalid accessor called");
return reinterpret_cast<T>(getPointer());
using AuthT = typename PointerUnionTypeSelector<T1 *, T, Auth1,
PointerUnionTypeSelector<T2 *, T, Auth2,
UNION_DOESNT_CONTAIN_TYPE<T>>>::Return;
return AuthT::auth((T)getPointer(), address);
}
template <typename T> T dyn_cast() const {
template <typename T> T dyn_cast(const void *address) const {
if (is<T>())
return get<T>();
return get<T>(address);
return T();
}
};

View File

@ -45,47 +45,21 @@
// by CF, so __IncompleteProtocol would be left without an R/R implementation
// otherwise, which would break ARC.
#ifdef DARLING
// see libdispatch
#if __has_attribute(objc_nonlazy_class)
#define NONLAZY_CLASS __attribute__((objc_nonlazy_class))
#define NONLAZY_CLASS_LOAD
#else
#define NONLAZY_CLASS
#define NONLAZY_CLASS_LOAD + (void)load {}
#endif
#endif
@interface __IncompleteProtocol : NSObject
@end
#if __OBJC2__
#ifdef DARLING
NONLAZY_CLASS
#else
__attribute__((objc_nonlazy_class))
#endif
#endif
@implementation __IncompleteProtocol
#ifdef DARLING
NONLAZY_CLASS_LOAD
#endif
@end
#if __OBJC2__
#ifdef DARLING
NONLAZY_CLASS
#else
__attribute__((objc_nonlazy_class))
#endif
#endif
@implementation Protocol
#ifdef DARLING
NONLAZY_CLASS_LOAD
#endif
- (BOOL) conformsTo: (Protocol *)aProtocolObj
{
return protocol_conformsToProtocol(self, aProtocolObj);
@ -126,7 +100,7 @@ NONLAZY_CLASS_LOAD
// check isKindOf:
Class cls;
Class protoClass = objc_getClass("Protocol");
for (cls = object_getClass(other); cls; cls = cls->superclass) {
for (cls = object_getClass(other); cls; cls = cls->getSuperclass()) {
if (cls == protoClass) break;
}
if (!cls) return NO;

View File

@ -28,6 +28,8 @@
#if __arm64__
#include "objc-config.h"
#if __LP64__
// true arm64
@ -134,6 +136,30 @@
paciza $0 // resign cached imp as IMP
.endmacro
.macro ExtractISA
and $0, $1, #ISA_MASK
#if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_STRIP
xpacd $0
#elif ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH
mov x10, $2
movk x10, #ISA_SIGNING_DISCRIMINATOR, LSL #48
autda $0, x10
#endif
.endmacro
.macro AuthISASuper dst, addr_mutable, discriminator
#if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH
movk \addr_mutable, #\discriminator, LSL #48
autda \dst, \addr_mutable
#elif ISA_SIGNING_AUTH_MODE == ISA_SIGNING_STRIP
xpacd \dst
#endif
.endmacro
.macro SignAsImp
paciza $0
.endmacro
// JOP
#else
// not JOP
@ -164,6 +190,13 @@
eor $0, $0, $3
.endmacro
.macro SignAsImp
.endmacro
.macro ExtractISA
and $0, $1, #ISA_MASK
.endmacro
// not JOP
#endif

View File

@ -0,0 +1,356 @@
// This file contains stubs matching the sybols previously exported by libobjc
// when i386 Mac was actually supported. These stubs allow us to tease apart the
// dependencies to prepare for removing i386 Mac libobjc entirely.
//
// This file is not built when building for any other arch/OS combination. When
// building for i386 Mac, no other source files are built, just this one. This
// is handled using the Included/Excluded Source File Names settings in Xcode,
// with arch/OS-specific overrides.
//
// rdar://problem/58541885
#pragma GCC visibility push(default)
const char ___ld_hide_os10_5__objc_class_name_NSObject __asm__("$ld$hide$os10.5$.objc_class_name_NSObject");
const char ___ld_hide_os10_6__objc_class_name_NSObject __asm__("$ld$hide$os10.6$.objc_class_name_NSObject");
const char ___ld_hide_os10_7__objc_class_name_NSObject __asm__("$ld$hide$os10.7$.objc_class_name_NSObject");
const char ___objc_class_name_List __asm__(".objc_class_name_List");
const char ___objc_class_name_NSObject __asm__(".objc_class_name_NSObject");
const char ___objc_class_name_Object __asm__(".objc_class_name_Object");
const char ___objc_class_name_Protocol __asm__(".objc_class_name_Protocol");
void NXCompareHashTables(void) {}
void NXCompareMapTables(void) {}
void NXCopyHashTable(void) {}
void NXCopyStringBuffer(void) {}
void NXCopyStringBufferFromZone(void) {}
void NXCountHashTable(void) {}
void NXCountMapTable(void) {}
void NXCreateHashTable(void) {}
void NXCreateHashTableFromZone(void) {}
void NXCreateMapTable(void) {}
void NXCreateMapTableFromZone(void) {}
void NXEmptyHashTable(void) {}
void NXFreeHashTable(void) {}
void NXFreeMapTable(void) {}
void NXHashGet(void) {}
void NXHashInsert(void) {}
void NXHashInsertIfAbsent(void) {}
void NXHashMember(void) {}
void NXHashRemove(void) {}
void NXInitHashState(void) {}
void NXInitMapState(void) {}
void NXMapGet(void) {}
void NXMapInsert(void) {}
void NXMapMember(void) {}
void NXMapRemove(void) {}
void NXNextHashState(void) {}
void NXNextMapState(void) {}
void NXNoEffectFree(void) {}
const char NXObjectMapPrototype;
void NXPtrHash(void) {}
void NXPtrIsEqual(void) {}
const char NXPtrPrototype;
const char NXPtrStructKeyPrototype;
const char NXPtrValueMapPrototype;
void NXReallyFree(void) {}
void NXResetHashTable(void) {}
void NXResetMapTable(void) {}
void NXStrHash(void) {}
void NXStrIsEqual(void) {}
const char NXStrPrototype;
const char NXStrStructKeyPrototype;
const char NXStrValueMapPrototype;
void NXUniqueString(void) {}
void NXUniqueStringNoCopy(void) {}
void NXUniqueStringWithLength(void) {}
char _alloc;
void _class_getIvarMemoryManagement(void) {}
void _class_isFutureClass(void) {}
void _class_isSwift(void) {}
char _copy;
char _dealloc;
char _error;
void _objcInit(void) {}
void _objc_addWillInitializeClassFunc(void) {}
void _objc_atfork_child(void) {}
void _objc_atfork_parent(void) {}
void _objc_atfork_prepare(void) {}
void _objc_autoreleasePoolPop(void) {}
void _objc_autoreleasePoolPrint(void) {}
void _objc_autoreleasePoolPush(void) {}
void _objc_deallocOnMainThreadHelper(void) {}
const char _objc_debug_class_hash;
const char _objc_empty_cache;
void _objc_error(void) {}
void _objc_flush_caches(void) {}
void _objc_getFreedObjectClass(void) {}
void _objc_init(void) {}
void _objc_msgForward(void) {}
void _objc_msgForward_stret(void) {}
void _objc_resolve_categories_for_class(void) {}
void _objc_rootAlloc(void) {}
void _objc_rootAllocWithZone(void) {}
void _objc_rootAutorelease(void) {}
void _objc_rootDealloc(void) {}
void _objc_rootFinalize(void) {}
void _objc_rootHash(void) {}
void _objc_rootInit(void) {}
void _objc_rootIsDeallocating(void) {}
void _objc_rootRelease(void) {}
void _objc_rootReleaseWasZero(void) {}
void _objc_rootRetain(void) {}
void _objc_rootRetainCount(void) {}
void _objc_rootTryRetain(void) {}
void _objc_rootZone(void) {}
void _objc_setBadAllocHandler(void) {}
void _objc_setClassLoader(void) {}
void _protocol_getMethodTypeEncoding(void) {}
char _realloc;
char _zoneAlloc;
char _zoneCopy;
char _zoneRealloc;
void class_addIvar(void) {}
void class_addMethod(void) {}
void class_addMethods(void) {}
void class_addProperty(void) {}
void class_addProtocol(void) {}
void class_conformsToProtocol(void) {}
void class_copyIvarList(void) {}
void class_copyMethodList(void) {}
void class_copyPropertyList(void) {}
void class_copyProtocolList(void) {}
void class_createInstance(void) {}
void class_createInstanceFromZone(void) {}
void class_createInstances(void) {}
void class_getClassMethod(void) {}
void class_getClassVariable(void) {}
void class_getImageName(void) {}
void class_getInstanceMethod(void) {}
void class_getInstanceSize(void) {}
void class_getInstanceVariable(void) {}
void class_getIvarLayout(void) {}
void class_getMethodImplementation(void) {}
void class_getMethodImplementation_stret(void) {}
void class_getName(void) {}
void class_getProperty(void) {}
void class_getSuperclass(void) {}
void class_getVersion(void) {}
void class_getWeakIvarLayout(void) {}
void class_isMetaClass(void) {}
void class_lookupMethod(void) {}
void class_nextMethodList(void) {}
void class_poseAs(void) {}
void class_removeMethods(void) {}
void class_replaceMethod(void) {}
void class_replaceProperty(void) {}
void class_respondsToMethod(void) {}
void class_respondsToSelector(void) {}
void class_setIvarLayout(void) {}
void class_setSuperclass(void) {}
void class_setVersion(void) {}
void class_setWeakIvarLayout(void) {}
void gdb_class_getClass(void) {}
void gdb_object_getClass(void) {}
void imp_getBlock(void) {}
void imp_implementationWithBlock(void) {}
void imp_removeBlock(void) {}
void instrumentObjcMessageSends(void) {}
void ivar_getName(void) {}
void ivar_getOffset(void) {}
void ivar_getTypeEncoding(void) {}
void method_copyArgumentType(void) {}
void method_copyReturnType(void) {}
void method_exchangeImplementations(void) {}
void method_getArgumentType(void) {}
void method_getDescription(void) {}
void method_getImplementation(void) {}
void method_getName(void) {}
void method_getNumberOfArguments(void) {}
void method_getReturnType(void) {}
void method_getSizeOfArguments(void) {}
void method_getTypeEncoding(void) {}
void method_invoke(void) {}
void method_invoke_stret(void) {}
void method_setImplementation(void) {}
void objc_addClass(void) {}
void objc_addLoadImageFunc(void) {}
void objc_alloc(void) {}
void objc_allocWithZone(void) {}
void objc_alloc_init(void) {}
void objc_allocateClassPair(void) {}
void objc_allocateProtocol(void) {}
void objc_allocate_object(void) {}
void objc_appRequiresGC(void) {}
void objc_assertRegisteredThreadWithCollector(void) {}
void objc_assign_global(void) {}
void objc_assign_ivar(void) {}
void objc_assign_strongCast(void) {}
void objc_assign_threadlocal(void) {}
void objc_assign_weak(void) {}
void objc_atomicCompareAndSwapGlobal(void) {}
void objc_atomicCompareAndSwapGlobalBarrier(void) {}
void objc_atomicCompareAndSwapInstanceVariable(void) {}
void objc_atomicCompareAndSwapInstanceVariableBarrier(void) {}
void objc_atomicCompareAndSwapPtr(void) {}
void objc_atomicCompareAndSwapPtrBarrier(void) {}
void objc_autorelease(void) {}
void objc_autoreleasePoolPop(void) {}
void objc_autoreleasePoolPush(void) {}
void objc_autoreleaseReturnValue(void) {}
void objc_clear_deallocating(void) {}
void objc_clear_stack(void) {}
void objc_collect(void) {}
void objc_collect_init(void) {}
void objc_collectableZone(void) {}
void objc_collectingEnabled(void) {}
void objc_collecting_enabled(void) {}
void objc_constructInstance(void) {}
void objc_copyClassList(void) {}
void objc_copyClassNamesForImage(void) {}
void objc_copyClassNamesForImageHeader(void) {}
void objc_copyCppObjectAtomic(void) {}
void objc_copyImageNames(void) {}
void objc_copyProtocolList(void) {}
void objc_copyStruct(void) {}
void objc_copyWeak(void) {}
const char objc_debug_autoreleasepoolpage_child_offset;
const char objc_debug_autoreleasepoolpage_depth_offset;
const char objc_debug_autoreleasepoolpage_hiwat_offset;
const char objc_debug_autoreleasepoolpage_magic_offset;
const char objc_debug_autoreleasepoolpage_next_offset;
const char objc_debug_autoreleasepoolpage_parent_offset;
const char objc_debug_autoreleasepoolpage_thread_offset;
void objc_destroyWeak(void) {}
void objc_destructInstance(void) {}
void objc_disposeClassPair(void) {}
void objc_dumpHeap(void) {}
void objc_duplicateClass(void) {}
void objc_enumerationMutation(void) {}
void objc_exception_extract(void) {}
void objc_exception_get_functions(void) {}
void objc_exception_match(void) {}
void objc_exception_set_functions(void) {}
void objc_exception_throw(void) {}
void objc_exception_try_enter(void) {}
void objc_exception_try_exit(void) {}
void objc_finalizeOnMainThread(void) {}
void objc_getAssociatedObject(void) {}
void objc_getClass(void) {}
void objc_getClassList(void) {}
void objc_getClasses(void) {}
void objc_getFutureClass(void) {}
void objc_getMetaClass(void) {}
void objc_getOrigClass(void) {}
void objc_getProperty(void) {}
void objc_getProtocol(void) {}
void objc_getRequiredClass(void) {}
void objc_initWeak(void) {}
void objc_initWeakOrNil(void) {}
void objc_initializeClassPair(void) {}
void objc_isAuto(void) {}
void objc_is_finalized(void) {}
void objc_loadModule(void) {}
void objc_loadModules(void) {}
void objc_loadWeak(void) {}
void objc_loadWeakRetained(void) {}
void objc_lookUpClass(void) {}
void objc_memmove_collectable(void) {}
void objc_moveWeak(void) {}
void objc_msgSend(void) {}
void objc_msgSendSuper(void) {}
void objc_msgSendSuper_stret(void) {}
void objc_msgSend_fpret(void) {}
void objc_msgSend_stret(void) {}
void objc_msgSendv(void) {}
void objc_msgSendv_fpret(void) {}
void objc_msgSendv_stret(void) {}
void objc_opt_class(void) {}
void objc_opt_isKindOfClass(void) {}
void objc_opt_new(void) {}
void objc_opt_respondsToSelector(void) {}
void objc_opt_self(void) {}
void objc_read_weak(void) {}
void objc_registerClassPair(void) {}
void objc_registerProtocol(void) {}
void objc_registerThreadWithCollector(void) {}
void objc_release(void) {}
void objc_removeAssociatedObjects(void) {}
void objc_retain(void) {}
void objc_retainAutorelease(void) {}
void objc_retainAutoreleaseReturnValue(void) {}
void objc_retainAutoreleasedReturnValue(void) {}
void objc_retainBlock(void) {}
void objc_retain_autorelease(void) {}
void objc_retainedObject(void) {}
void objc_setAssociatedObject(void) {}
void objc_setClassHandler(void) {}
void objc_setCollectionRatio(void) {}
void objc_setCollectionThreshold(void) {}
void objc_setEnumerationMutationHandler(void) {}
void objc_setForwardHandler(void) {}
void objc_setHook_getImageName(void) {}
void objc_setMultithreaded(void) {}
void objc_setProperty(void) {}
void objc_setProperty_atomic(void) {}
void objc_setProperty_atomic_copy(void) {}
void objc_setProperty_nonatomic(void) {}
void objc_setProperty_nonatomic_copy(void) {}
void objc_set_collection_ratio(void) {}
void objc_set_collection_threshold(void) {}
void objc_should_deallocate(void) {}
void objc_startCollectorThread(void) {}
void objc_start_collector_thread(void) {}
void objc_storeStrong(void) {}
void objc_storeWeak(void) {}
void objc_storeWeakOrNil(void) {}
void objc_sync_enter(void) {}
void objc_sync_exit(void) {}
void objc_sync_try_enter(void) {}
void objc_unloadModules(void) {}
void objc_unregisterThreadWithCollector(void) {}
void objc_unretainedObject(void) {}
void objc_unretainedPointer(void) {}
void objc_unsafeClaimAutoreleasedReturnValue(void) {}
void object_copy(void) {}
void object_copyFromZone(void) {}
void object_dispose(void) {}
void object_getClass(void) {}
void object_getClassName(void) {}
void object_getIndexedIvars(void) {}
void object_getInstanceVariable(void) {}
void object_getIvar(void) {}
void object_getMethodImplementation(void) {}
void object_getMethodImplementation_stret(void) {}
void object_isClass(void) {}
void object_realloc(void) {}
void object_reallocFromZone(void) {}
void object_setClass(void) {}
void object_setInstanceVariable(void) {}
void object_setInstanceVariableWithStrongDefault(void) {}
void object_setIvar(void) {}
void object_setIvarWithStrongDefault(void) {}
void property_copyAttributeList(void) {}
void property_copyAttributeValue(void) {}
void property_getAttributes(void) {}
void property_getName(void) {}
void protocol_addMethodDescription(void) {}
void protocol_addProperty(void) {}
void protocol_addProtocol(void) {}
void protocol_conformsToProtocol(void) {}
void protocol_copyMethodDescriptionList(void) {}
void protocol_copyPropertyList(void) {}
void protocol_copyPropertyList2(void) {}
void protocol_copyProtocolList(void) {}
void protocol_getMethodDescription(void) {}
void protocol_getName(void) {}
void protocol_getProperty(void) {}
void protocol_isEqual(void) {}
void sel_getName(void) {}
void sel_getUid(void) {}
void sel_isEqual(void) {}
void sel_isMapped(void) {}
void sel_registerName(void) {}
void objc_cache_buckets(void) {}
void objc_cache_bytesForCapacity(void) {}
void objc_cache_capacity(void) {}
void objc_cache_occupied(void) {}
void objc_copyClassesForImage(void) {}

View File

View File

@ -55,9 +55,27 @@
// uintptr_t extraBytes : 1; // allocated with extra bytes
# if __arm64__
// ARM64 simulators have a larger address space, so use the ARM64e
// scheme even when simulators build for ARM64-not-e.
# if __has_feature(ptrauth_calls) || TARGET_OS_SIMULATOR
# define ISA_MASK 0x007ffffffffffff8ULL
# define ISA_MAGIC_MASK 0x0000000000000001ULL
# define ISA_MAGIC_VALUE 0x0000000000000001ULL
# define ISA_HAS_CXX_DTOR_BIT 0
# define ISA_BITFIELD \
uintptr_t nonpointer : 1; \
uintptr_t has_assoc : 1; \
uintptr_t weakly_referenced : 1; \
uintptr_t shiftcls_and_sig : 52; \
uintptr_t has_sidetable_rc : 1; \
uintptr_t extra_rc : 8
# define RC_ONE (1ULL<<56)
# define RC_HALF (1ULL<<7)
# else
# define ISA_MASK 0x0000000ffffffff8ULL
# define ISA_MAGIC_MASK 0x000003f000000001ULL
# define ISA_MAGIC_VALUE 0x000001a000000001ULL
# define ISA_HAS_CXX_DTOR_BIT 1
# define ISA_BITFIELD \
uintptr_t nonpointer : 1; \
uintptr_t has_assoc : 1; \
@ -65,16 +83,18 @@
uintptr_t shiftcls : 33; /*MACH_VM_MAX_ADDRESS 0x1000000000*/ \
uintptr_t magic : 6; \
uintptr_t weakly_referenced : 1; \
uintptr_t deallocating : 1; \
uintptr_t unused : 1; \
uintptr_t has_sidetable_rc : 1; \
uintptr_t extra_rc : 19
# define RC_ONE (1ULL<<45)
# define RC_HALF (1ULL<<18)
# endif
# elif __x86_64__
# define ISA_MASK 0x00007ffffffffff8ULL
# define ISA_MAGIC_MASK 0x001f800000000001ULL
# define ISA_MAGIC_VALUE 0x001d800000000001ULL
# define ISA_HAS_CXX_DTOR_BIT 1
# define ISA_BITFIELD \
uintptr_t nonpointer : 1; \
uintptr_t has_assoc : 1; \
@ -82,7 +102,7 @@
uintptr_t shiftcls : 44; /*MACH_VM_MAX_ADDRESS 0x7fffffe00000*/ \
uintptr_t magic : 6; \
uintptr_t weakly_referenced : 1; \
uintptr_t deallocating : 1; \
uintptr_t unused : 1; \
uintptr_t has_sidetable_rc : 1; \
uintptr_t extra_rc : 8
# define RC_ONE (1ULL<<56)
@ -109,6 +129,7 @@
# define ISA_INDEX_COUNT (1 << ISA_INDEX_BITS)
# define ISA_INDEX_MAGIC_MASK 0x001E0001
# define ISA_INDEX_MAGIC_VALUE 0x001C0001
# define ISA_HAS_CXX_DTOR_BIT 1
# define ISA_BITFIELD \
uintptr_t nonpointer : 1; \
uintptr_t has_assoc : 1; \
@ -116,7 +137,7 @@
uintptr_t magic : 4; \
uintptr_t has_cxx_dtor : 1; \
uintptr_t weakly_referenced : 1; \
uintptr_t deallocating : 1; \
uintptr_t unused : 1; \
uintptr_t has_sidetable_rc : 1; \
uintptr_t extra_rc : 7
# define RC_ONE (1ULL<<25)

View File

@ -46,7 +46,7 @@
/* Linker metadata symbols */
// NSObject was in Foundation/CF on macOS < 10.8.
#if TARGET_OS_OSX
#if TARGET_OS_OSX && (__x86_64__ || __i386__)
#if __OBJC2__
OBJC_EXPORT const char __objc_nsobject_class_10_5
@ -171,6 +171,15 @@ HasClassProperties:
Old ABI: Set by some compilers. Not used by the runtime.
*/
// Description of an expected duplicate class name.
// __DATA,__objc_dupclass stores one of these. Only the main image is
// consulted for these purposes.
typedef struct _objc_duplicate_class {
uint32_t version;
uint32_t flags;
const char name[64];
} objc_duplicate_class;
#define OBJC_HAS_DUPLICATE_CLASS 1
/* Properties */
@ -412,7 +421,7 @@ objc_retainBlock(id _Nullable)
// Extract class pointer from an isa field.
#if TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC
#if TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST && !__arm64__
// No simulators use nonpointer isa yet.
#elif __LP64__

View File

@ -118,6 +118,12 @@
# define NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER 1
#endif
/* The arm64 ABI requires proper casting to ensure arguments are passed
* * correctly. */
#if defined(__arm64__) && !__swift__
# undef OBJC_OLD_DISPATCH_PROTOTYPES
# define OBJC_OLD_DISPATCH_PROTOTYPES 0
#endif
/* OBJC_OLD_DISPATCH_PROTOTYPES == 0 enforces the rule that the dispatch
* functions must be cast to an appropriate function pointer type. */

View File

@ -57,6 +57,16 @@
# define TrampolinePtrauth
#endif
// A page of trampolines is as big as the maximum supported page size
// everywhere except i386. i386 only exists for the watch simulator
// now, and we know it really only has 4kB pages. Also see comments
// below about PAGE_SIZE and PAGE_MAX_SIZE.
#ifdef __i386__
#define TRAMPOLINE_PAGE_SIZE PAGE_MIN_SIZE
#else
#define TRAMPOLINE_PAGE_SIZE PAGE_MAX_SIZE
#endif
class TrampolinePointerWrapper {
struct TrampolinePointers {
class TrampolineAddress {
@ -103,22 +113,22 @@ class TrampolinePointerWrapper {
void check() {
#if DEBUG
ASSERT(impl.address() == textSegment + PAGE_MAX_SIZE);
ASSERT(impl.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
assert(impl.address() + PAGE_MAX_SIZE ==
ASSERT(impl.address() == textSegment + TRAMPOLINE_PAGE_SIZE);
ASSERT(impl.address() % PAGE_SIZE == 0); // not TRAMPOLINE_PAGE_SIZE
ASSERT(impl.address() + TRAMPOLINE_PAGE_SIZE ==
last.address() + SLOT_SIZE);
ASSERT(last.address()+8 < textSegment + textSegmentSize);
ASSERT((last.address() - start.address()) % SLOT_SIZE == 0);
# if SUPPORT_STRET
ASSERT(impl_stret.address() == textSegment + 2*PAGE_MAX_SIZE);
ASSERT(impl_stret.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
assert(impl_stret.address() + PAGE_MAX_SIZE ==
ASSERT(impl_stret.address() == textSegment + 2*TRAMPOLINE_PAGE_SIZE);
ASSERT(impl_stret.address() % PAGE_SIZE == 0); // not TRAMPOLINE_PAGE_SIZE
ASSERT(impl_stret.address() + TRAMPOLINE_PAGE_SIZE ==
last_stret.address() + SLOT_SIZE);
assert(start.address() - impl.address() ==
ASSERT(start.address() - impl.address() ==
start_stret.address() - impl_stret.address());
assert(last_stret.address() + SLOT_SIZE <
ASSERT(last_stret.address() + SLOT_SIZE <
textSegment + textSegmentSize);
assert((last_stret.address() - start_stret.address())
ASSERT((last_stret.address() - start_stret.address())
% SLOT_SIZE == 0);
# endif
#endif
@ -178,8 +188,7 @@ public:
uintptr_t textSegment() { return get()->textSegment; }
uintptr_t textSegmentSize() { return get()->textSegmentSize; }
// See comments below about PAGE_SIZE and PAGE_MAX_SIZE.
uintptr_t dataSize() { return PAGE_MAX_SIZE; }
uintptr_t dataSize() { return TRAMPOLINE_PAGE_SIZE; }
uintptr_t impl() { return get()->impl.address(); }
uintptr_t start() { return get()->start.address(); }
@ -202,11 +211,13 @@ typedef enum {
// We must take care with our data layout on architectures that support
// multiple page sizes.
//
// The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE.
// On some platforms this requires additional linker flags.
// The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE,
// except on i386 which is a weird special case that uses PAGE_MIN_SIZE.
// The TRAMPOLINE_PAGE_SIZE macro handles this difference. On some platforms,
// aligning to PAGE_MAX_SIZE requires additional linker flags.
//
// When we allocate a page group, we use PAGE_MAX_SIZE size.
// This allows trampoline code to find its data by subtracting PAGE_MAX_SIZE.
// When we allocate a page group, we use TRAMPOLINE_PAGE_SIZE size.
// This allows trampoline code to find its data by subtracting TRAMPOLINE_PAGE_SIZE.
//
// When we allocate a page group, we use the process's page alignment.
// This simplifies allocation because we don't need to force greater than
@ -231,14 +242,14 @@ struct TrampolineBlockPageGroup
// Payload data: block pointers and free list.
// Bytes parallel with trampoline header code are the fields above or unused
// uint8_t payloads[PAGE_MAX_SIZE - sizeof(TrampolineBlockPageGroup)]
// uint8_t payloads[TRAMPOLINE_PAGE_SIZE - sizeof(TrampolineBlockPageGroup)]
// Code: Mach-O header, then trampoline header followed by trampolines.
// On platforms with struct return we have non-stret trampolines and
// stret trampolines. The stret and non-stret trampolines at a given
// index share the same data page.
// uint8_t macho[PAGE_MAX_SIZE];
// uint8_t trampolines[ArgumentModeCount][PAGE_MAX_SIZE];
// uint8_t macho[TRAMPOLINE_PAGE_SIZE];
// uint8_t trampolines[ArgumentModeCount][TRAMPOLINE_PAGE_SIZE];
// Per-trampoline block data format:
// initial value is 0 while page data is filled sequentially
@ -280,7 +291,7 @@ struct TrampolineBlockPageGroup
// Skip over the data area, one page of Mach-O headers,
// and one text page for each mode before this one.
return (uintptr_t)this + Trampolines.dataSize() +
PAGE_MAX_SIZE * (1 + aMode);
TRAMPOLINE_PAGE_SIZE * (1 + aMode);
}
IMP trampoline(int aMode, uintptr_t index) {

8
runtime/objc-blocktramps-i386.S Normal file → Executable file
View File

@ -30,13 +30,13 @@
.globl __objc_blockTrampolineStart
.globl __objc_blockTrampolineLast
.align PAGE_SHIFT
.align 12 /* PAGE_SHIFT */
__objc_blockTrampolineImpl:
movl (%esp), %eax // return address pushed by trampoline
// 4(%esp) is return address pushed by the call site
movl 8(%esp), %ecx // self -> ecx
movl %ecx, 12(%esp) // ecx -> _cmd
movl -2*PAGE_SIZE-5(%eax), %ecx // block object pointer -> ecx
movl -2*4096/*PAGE_SIZE */-5(%eax), %ecx // block object pointer -> ecx
// trampoline is -5 bytes from the return address
// data is -2 pages from the trampoline
movl %ecx, 8(%esp) // ecx -> self
@ -567,14 +567,14 @@ __objc_blockTrampolineLast:
.globl __objc_blockTrampolineStart_stret
.globl __objc_blockTrampolineLast_stret
.align PAGE_SHIFT
.align 12 /* PAGE_SHIFT */
__objc_blockTrampolineImpl_stret:
movl (%esp), %eax // return address pushed by trampoline
// 4(%esp) is return address pushed by the call site
// 8(%esp) is struct-return address
movl 12(%esp), %ecx // self -> ecx
movl %ecx, 16(%esp) // ecx -> _cmd
movl -3*PAGE_SIZE-5(%eax), %ecx // block object pointer -> ecx
movl -3*4096/*PAGE_SIZE*/-5(%eax), %ecx // block object pointer -> ecx
// trampoline is -5 bytes from the return address
// data is -3 pages from the trampoline
movl %ecx, 12(%esp) // ecx -> self

74
runtime/objc-blocktramps-x86_64.S Normal file → Executable file
View File

@ -30,22 +30,37 @@
.globl __objc_blockTrampolineStart
.globl __objc_blockTrampolineLast
.align PAGE_SHIFT
.align PAGE_MAX_SHIFT
__objc_blockTrampolineImpl:
movq (%rsp), %r10 // read return address pushed by TrampolineEntry's callq
movq %rdi, %rsi // arg1 -> arg2
movq -2*PAGE_SIZE-5(%r10), %rdi // block object pointer -> arg1
movq -2*PAGE_MAX_SIZE-5(%r10), %rdi // block object pointer -> arg1
// trampoline is -5 bytes from the return address
// data is -2 pages from the trampoline
ret // back to TrampolineEntry to preserve CPU's return stack
.macro TrampolineEntry
.macro TrampolineEntry1
// This trampoline is 8 bytes long.
// This callq is 5 bytes long.
callq __objc_blockTrampolineImpl
jmp *16(%rdi)
.endmacro
.macro TrampolineEntry4
TrampolineEntry1
TrampolineEntry1
TrampolineEntry1
TrampolineEntry1
.endmacro
#if PAGE_MAX_SHIFT == 12
#define TrampolineEntry TrampolineEntry1
#elif PAGE_MAX_SHIFT == 14
#define TrampolineEntry TrampolineEntry4
#else
#error "unknown PAGE_MAX_SHIFT value"
#endif
.align 5
__objc_blockTrampolineStart:
TrampolineEntry
@ -555,8 +570,26 @@ __objc_blockTrampolineStart:
TrampolineEntry
TrampolineEntry
TrampolineEntry
// The above is 507 entries.
#if PAGE_MAX_SHIFT == 14
// With 16kB pages, we need (4096*4-32)/8 = 2044 single entries, or
// 511 "quad" entries as above. We need 3 more regular entries, then
// 3 more singular entries, and finally a singular entry labeled Last.
TrampolineEntry
TrampolineEntry
TrampolineEntry
TrampolineEntry1
TrampolineEntry1
TrampolineEntry1
__objc_blockTrampolineLast:
TrampolineEntry1
#else
// With 4kB pages, we need (4096-32)/8 = 508 entries. We have one
// more at the end with the Last label for a total of 508.
__objc_blockTrampolineLast:
TrampolineEntry
#endif
.text
@ -564,24 +597,39 @@ __objc_blockTrampolineLast:
.globl __objc_blockTrampolineStart_stret
.globl __objc_blockTrampolineLast_stret
.align PAGE_SHIFT
.align PAGE_MAX_SHIFT
__objc_blockTrampolineImpl_stret:
// %rdi -- arg1 -- is address of return value's space. Don't mess with it.
movq (%rsp), %r10 // read return address pushed by TrampolineEntry's callq
movq %rsi, %rdx // arg2 -> arg3
movq -3*PAGE_SIZE-5(%r10), %rsi // block object pointer -> arg2
movq -3*PAGE_MAX_SIZE-5(%r10), %rsi // block object pointer -> arg2
// trampoline is -5 bytes from the return address
// data is -3 pages from the trampoline
ret // back to TrampolineEntry to preserve CPU's return stack
.macro TrampolineEntry_stret
.macro TrampolineEntry_stret1
// This trampoline is 8 bytes long.
// This callq is 5 bytes long.
callq __objc_blockTrampolineImpl_stret
jmp *16(%rsi)
.endmacro
.macro TrampolineEntry_stret4
TrampolineEntry_stret1
TrampolineEntry_stret1
TrampolineEntry_stret1
TrampolineEntry_stret1
.endmacro
#if PAGE_MAX_SHIFT == 12
#define TrampolineEntry_stret TrampolineEntry_stret1
#elif PAGE_MAX_SHIFT == 14
#define TrampolineEntry_stret TrampolineEntry_stret4
#else
#error "unknown PAGE_MAX_SHIFT value"
#endif
.align 5
__objc_blockTrampolineStart_stret:
TrampolineEntry_stret
@ -1091,7 +1139,21 @@ __objc_blockTrampolineStart_stret:
TrampolineEntry_stret
TrampolineEntry_stret
TrampolineEntry_stret
// See the comment on non-stret's Last for why we have additional
// entries here.
#if PAGE_MAX_SHIFT == 14
TrampolineEntry_stret
TrampolineEntry_stret
TrampolineEntry_stret
TrampolineEntry_stret1
TrampolineEntry_stret1
TrampolineEntry_stret1
__objc_blockTrampolineLast_stret:
TrampolineEntry_stret1
#else
__objc_blockTrampolineLast_stret:
TrampolineEntry_stret
#endif
#endif

View File

@ -1795,9 +1795,5 @@ void _class_printMethodCacheStatistics(void)
#endif
void cache_init()
{
}
// !__OBJC2__
#endif

View File

@ -1,23 +0,0 @@
#ifndef _OBJC_CACHE_H
#define _OBJC_CACHE_H
#include "objc-private.h"
__BEGIN_DECLS
extern void cache_init(void);
extern IMP cache_getImp(Class cls, SEL sel);
extern void cache_fill(Class cls, SEL sel, IMP imp, id receiver);
extern void cache_erase_nolock(Class cls);
extern void cache_delete(Class cls);
extern void cache_collect(bool collectALot);
__END_DECLS
#endif

View File

@ -63,14 +63,12 @@
* objc_msgSend*
* cache_getImp
*
* Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
* cache_fill (acquires lock)
* cache_expand (only called from cache_fill)
* cache_create (only called from cache_expand)
* bcopy (only called from instrumented cache_expand)
* flush_caches (acquires lock)
* cache_flush (only called from cache_fill and flush_caches)
* cache_collect_free (only called from cache_expand and cache_flush)
* Cache readers/writers (hold cacheUpdateLock during access; not PC-checked)
* cache_t::copyCacheNolock (caller must hold the lock)
* cache_t::eraseNolock (caller must hold the lock)
* cache_t::collectNolock (caller must hold the lock)
* cache_t::insert (acquires lock)
* cache_t::destroy (acquires lock)
*
* UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
* cache_print
@ -84,21 +82,96 @@
#if __OBJC2__
#include "objc-private.h"
#include "objc-cache.h"
#if TARGET_OS_OSX
#include <Cambria/Traps.h>
#include <Cambria/Cambria.h>
#endif
#if __arm__ || __x86_64__ || __i386__
// objc_msgSend has few registers available.
// Cache scan increments and wraps at special end-marking bucket.
#define CACHE_END_MARKER 1
// Historical fill ratio of 75% (since the new objc runtime was introduced).
static inline mask_t cache_fill_ratio(mask_t capacity) {
return capacity * 3 / 4;
}
#elif __arm64__ && !__LP64__
// objc_msgSend has lots of registers available.
// Cache scan decrements. No end marker needed.
#define CACHE_END_MARKER 0
// Historical fill ratio of 75% (since the new objc runtime was introduced).
static inline mask_t cache_fill_ratio(mask_t capacity) {
return capacity * 3 / 4;
}
#elif __arm64__ && __LP64__
// objc_msgSend has lots of registers available.
// Cache scan decrements. No end marker needed.
#define CACHE_END_MARKER 0
// Allow 87.5% fill ratio in the fast path for all cache sizes.
// Increasing the cache fill ratio reduces the fragmentation and wasted space
// in imp-caches at the cost of potentially increasing the average lookup of
// a selector in imp-caches by increasing collision chains. Another potential
// change is that cache table resizes / resets happen at different moments.
static inline mask_t cache_fill_ratio(mask_t capacity) {
return capacity * 7 / 8;
}
// Allow 100% cache utilization for smaller cache sizes. This has the same
// advantages and disadvantages as the fill ratio. A very large percentage
// of caches end up with very few entries and the worst case of collision
// chains in small tables is relatively small.
// NOTE: objc_msgSend properly handles a cache lookup with a full cache.
#define CACHE_ALLOW_FULL_UTILIZATION 1
#else
#error unknown architecture
#endif
/* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
enum {
#if CACHE_END_MARKER || (__arm64__ && !__LP64__)
// When we have a cache end marker it fills a bucket slot, so having a
// initial cache size of 2 buckets would not be efficient when one of the
// slots is always filled with the end marker. So start with a cache size
// 4 buckets.
INIT_CACHE_SIZE_LOG2 = 2,
#else
// Allow an initial bucket size of 2 buckets, since a large number of
// classes, especially metaclasses, have very few imps, and we support
// the ability to fill 100% of the cache before resizing.
INIT_CACHE_SIZE_LOG2 = 1,
#endif
INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2),
MAX_CACHE_SIZE_LOG2 = 16,
MAX_CACHE_SIZE = (1 << MAX_CACHE_SIZE_LOG2),
FULL_UTILIZATION_CACHE_SIZE_LOG2 = 3,
FULL_UTILIZATION_CACHE_SIZE = (1 << FULL_UTILIZATION_CACHE_SIZE_LOG2),
};
static void cache_collect_free(struct bucket_t *data, mask_t capacity);
static int _collecting_in_critical(void);
static void _garbage_make_room(void);
#if DEBUG_TASK_THREADS
static kern_return_t objc_task_threads
(
task_t target_task,
thread_act_array_t *act_list,
mach_msg_type_number_t *act_listCnt
);
#endif
#if DEBUG_TASK_THREADS
#undef HAVE_TASK_RESTARTABLE_RANGES
#endif
/***********************************************************************
* Cache statistics for OBJC_PRINT_CACHE_SETUP
@ -159,25 +232,21 @@ asm("\n .section __TEXT,__const"
#endif
);
#if CONFIG_USE_PREOPT_CACHES
__attribute__((used, section("__DATA_CONST,__objc_scoffs")))
uintptr_t objc_opt_offsets[__OBJC_OPT_OFFSETS_COUNT];
#endif
#if __arm__ || __x86_64__ || __i386__
// objc_msgSend has few registers available.
// Cache scan increments and wraps at special end-marking bucket.
#define CACHE_END_MARKER 1
#if CACHE_END_MARKER
static inline mask_t cache_next(mask_t i, mask_t mask) {
return (i+1) & mask;
}
#elif __arm64__
// objc_msgSend has lots of registers available.
// Cache scan decrements. No end marker needed.
#define CACHE_END_MARKER 0
static inline mask_t cache_next(mask_t i, mask_t mask) {
return i ? i-1 : mask;
}
#else
#error unknown architecture
#error unexpected configuration
#endif
@ -237,29 +306,27 @@ ldp(uintptr_t& onep, uintptr_t& twop, const void *srcp)
static inline mask_t cache_hash(SEL sel, mask_t mask)
{
return (mask_t)(uintptr_t)sel & mask;
}
cache_t *getCache(Class cls)
{
ASSERT(cls);
return &cls->cache;
uintptr_t value = (uintptr_t)sel;
#if CONFIG_USE_PREOPT_CACHES
value ^= value >> 7;
#endif
return (mask_t)(value & mask);
}
#if __arm64__
template<Atomicity atomicity, IMPEncoding impEncoding>
void bucket_t::set(SEL newSel, IMP newImp, Class cls)
void bucket_t::set(bucket_t *base, SEL newSel, IMP newImp, Class cls)
{
ASSERT(_sel.load(memory_order::memory_order_relaxed) == 0 ||
_sel.load(memory_order::memory_order_relaxed) == newSel);
ASSERT(_sel.load(memory_order_relaxed) == 0 ||
_sel.load(memory_order_relaxed) == newSel);
static_assert(offsetof(bucket_t,_imp) == 0 &&
offsetof(bucket_t,_sel) == sizeof(void *),
"bucket_t layout doesn't match arm64 bucket_t::set()");
uintptr_t encodedImp = (impEncoding == Encoded
? encodeImp(newImp, newSel, cls)
? encodeImp(base, newImp, newSel, cls)
: (uintptr_t)newImp);
// LDP/STP guarantees that all observers get
@ -270,10 +337,10 @@ void bucket_t::set(SEL newSel, IMP newImp, Class cls)
#else
template<Atomicity atomicity, IMPEncoding impEncoding>
void bucket_t::set(SEL newSel, IMP newImp, Class cls)
void bucket_t::set(bucket_t *base, SEL newSel, IMP newImp, Class cls)
{
ASSERT(_sel.load(memory_order::memory_order_relaxed) == 0 ||
_sel.load(memory_order::memory_order_relaxed) == newSel);
ASSERT(_sel.load(memory_order_relaxed) == 0 ||
_sel.load(memory_order_relaxed) == newSel);
// objc_msgSend uses sel and imp with no locks.
// It is safe for objc_msgSend to see new imp but NULL sel
@ -282,30 +349,196 @@ void bucket_t::set(SEL newSel, IMP newImp, Class cls)
// Therefore we write new imp, wait a lot, then write new sel.
uintptr_t newIMP = (impEncoding == Encoded
? encodeImp(newImp, newSel, cls)
? encodeImp(base, newImp, newSel, cls)
: (uintptr_t)newImp);
if (atomicity == Atomic) {
_imp.store(newIMP, memory_order::memory_order_relaxed);
_imp.store(newIMP, memory_order_relaxed);
if (_sel.load(memory_order::memory_order_relaxed) != newSel) {
if (_sel.load(memory_order_relaxed) != newSel) {
#ifdef __arm__
mega_barrier();
_sel.store(newSel, memory_order::memory_order_relaxed);
_sel.store(newSel, memory_order_relaxed);
#elif __x86_64__ || __i386__
_sel.store(newSel, memory_order::memory_order_release);
_sel.store(newSel, memory_order_release);
#else
#error Don't know how to do bucket_t::set on this architecture.
#endif
}
} else {
_imp.store(newIMP, memory_order::memory_order_relaxed);
_sel.store(newSel, memory_order::memory_order_relaxed);
_imp.store(newIMP, memory_order_relaxed);
_sel.store(newSel, memory_order_relaxed);
}
}
#endif
void cache_t::initializeToEmpty()
{
_bucketsAndMaybeMask.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
_originalPreoptCache.store(nullptr, std::memory_order_relaxed);
}
#if CONFIG_USE_PREOPT_CACHES
/*
* The shared cache builder will sometimes have prebuilt an IMP cache
* for the class and left a `preopt_cache_t` pointer in _originalPreoptCache.
*
* However we have this tension:
* - when the class is realized it has to have a cache that can't resolve any
* selector until the class is properly initialized so that every
* caller falls in the slowpath and synchronizes with the class initializing,
* - we need to remember that cache pointer and we have no space for that.
*
* The caches are designed so that preopt_cache::bit_one is set to 1,
* so we "disguise" the pointer so that it looks like a cache of capacity 1
* where that bit one aliases with where the top bit of a SEL in the bucket_t
* would live:
*
* +----------------+----------------+
* | IMP | SEL | << a bucket_t
* +----------------+----------------+--------------...
* preopt_cache_t >>| 1| ...
* +----------------+--------------...
*
* The shared cache guarantees that there's valid memory to read under "IMP"
*
* This lets us encode the original preoptimized cache pointer during
* initialization, and we can reconstruct its original address and install
* it back later.
*/
void cache_t::initializeToPreoptCacheInDisguise(const preopt_cache_t *cache)
{
// preopt_cache_t::bit_one is 1 which sets the top bit
// and is never set on any valid selector
uintptr_t value = (uintptr_t)cache + sizeof(preopt_cache_t) -
(bucket_t::offsetOfSel() + sizeof(SEL));
_originalPreoptCache.store(nullptr, std::memory_order_relaxed);
setBucketsAndMask((bucket_t *)value, 0);
_occupied = cache->occupied;
}
void cache_t::maybeConvertToPreoptimized()
{
const preopt_cache_t *cache = disguised_preopt_cache();
if (cache == nil) {
return;
}
if (!cls()->allowsPreoptCaches() ||
(cache->has_inlines && !cls()->allowsPreoptInlinedSels())) {
if (PrintCaches) {
_objc_inform("CACHES: %sclass %s: dropping cache (from %s)",
cls()->isMetaClass() ? "meta" : "",
cls()->nameForLogging(), "setInitialized");
}
return setBucketsAndMask(emptyBuckets(), 0);
}
uintptr_t value = (uintptr_t)&cache->entries;
#if __has_feature(ptrauth_calls)
value = (uintptr_t)ptrauth_sign_unauthenticated((void *)value,
ptrauth_key_process_dependent_data, (uintptr_t)cls());
#endif
value |= preoptBucketsHashParams(cache) | preoptBucketsMarker;
_bucketsAndMaybeMask.store(value, memory_order_relaxed);
_occupied = cache->occupied;
}
void cache_t::initializeToEmptyOrPreoptimizedInDisguise()
{
if (os_fastpath(!DisablePreoptCaches)) {
if (!objc::dataSegmentsRanges.inSharedCache((uintptr_t)this)) {
if (dyld_shared_cache_some_image_overridden()) {
// If the system has roots, then we must disable preoptimized
// caches completely. If a class in another image has a
// superclass in the root, the offset to the superclass will
// be wrong. rdar://problem/61601961
cls()->setDisallowPreoptCachesRecursively("roots");
}
return initializeToEmpty();
}
auto cache = _originalPreoptCache.load(memory_order_relaxed);
if (cache) {
return initializeToPreoptCacheInDisguise(cache);
}
}
return initializeToEmpty();
}
const preopt_cache_t *cache_t::preopt_cache() const
{
auto addr = _bucketsAndMaybeMask.load(memory_order_relaxed);
addr &= preoptBucketsMask;
#if __has_feature(ptrauth_calls)
#if __BUILDING_OBJCDT__
addr = (uintptr_t)ptrauth_strip((preopt_cache_entry_t *)addr,
ptrauth_key_process_dependent_data);
#else
addr = (uintptr_t)ptrauth_auth_data((preopt_cache_entry_t *)addr,
ptrauth_key_process_dependent_data, (uintptr_t)cls());
#endif
#endif
return (preopt_cache_t *)(addr - sizeof(preopt_cache_t));
}
const preopt_cache_t *cache_t::disguised_preopt_cache() const
{
bucket_t *b = buckets();
if ((intptr_t)b->sel() >= 0) return nil;
uintptr_t value = (uintptr_t)b + bucket_t::offsetOfSel() + sizeof(SEL);
return (preopt_cache_t *)(value - sizeof(preopt_cache_t));
}
Class cache_t::preoptFallbackClass() const
{
return (Class)((uintptr_t)cls() + preopt_cache()->fallback_class_offset);
}
bool cache_t::isConstantOptimizedCache(bool strict, uintptr_t empty_addr) const
{
uintptr_t addr = _bucketsAndMaybeMask.load(memory_order_relaxed);
if (addr & preoptBucketsMarker) {
return true;
}
if (strict) {
return false;
}
return mask() == 0 && addr != empty_addr;
}
bool cache_t::shouldFlush(SEL sel, IMP imp) const
{
// This test isn't backwards: disguised caches aren't "strict"
// constant optimized caches
if (!isConstantOptimizedCache(/*strict*/true)) {
const preopt_cache_t *cache = disguised_preopt_cache();
if (cache) {
uintptr_t offs = (uintptr_t)sel - (uintptr_t)@selector(🤯);
uintptr_t slot = ((offs >> cache->shift) & cache->mask);
auto &entry = cache->entries[slot];
return entry.sel_offs == offs &&
(uintptr_t)cls() - entry.imp_offs ==
(uintptr_t)ptrauth_strip(imp, ptrauth_key_function_pointer);
}
}
return cache_getImp(cls(), sel) == imp;
}
bool cache_t::isConstantOptimizedCacheWithInlinedSels() const
{
return isConstantOptimizedCache(/* strict */true) && preopt_cache()->has_inlines;
}
#endif // CONFIG_USE_PREOPT_CACHES
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
@ -321,47 +554,31 @@ void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
// ensure other threads see buckets contents before buckets pointer
mega_barrier();
_buckets.store(newBuckets, memory_order::memory_order_relaxed);
_bucketsAndMaybeMask.store((uintptr_t)newBuckets, memory_order_relaxed);
// ensure other threads see new buckets before new mask
mega_barrier();
_mask.store(newMask, memory_order::memory_order_relaxed);
_maybeMask.store(newMask, memory_order_relaxed);
_occupied = 0;
#elif __x86_64__ || i386
// ensure other threads see buckets contents before buckets pointer
_buckets.store(newBuckets, memory_order::memory_order_release);
_bucketsAndMaybeMask.store((uintptr_t)newBuckets, memory_order_release);
// ensure other threads see new buckets before new mask
_mask.store(newMask, memory_order::memory_order_release);
_maybeMask.store(newMask, memory_order_release);
_occupied = 0;
#else
#error Don't know how to do setBucketsAndMask on this architecture.
#endif
}
struct bucket_t *cache_t::emptyBuckets()
mask_t cache_t::mask() const
{
return (bucket_t *)&_objc_empty_cache;
return _maybeMask.load(memory_order_relaxed);
}
struct bucket_t *cache_t::buckets()
{
return _buckets.load(memory_order::memory_order_relaxed);
}
mask_t cache_t::mask()
{
return _mask.load(memory_order::memory_order_relaxed);
}
void cache_t::initializeToEmpty()
{
bzero(this, sizeof(*this));
_buckets.store((bucket_t *)&_objc_empty_cache, memory_order::memory_order_relaxed);
}
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 || CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
{
@ -371,33 +588,16 @@ void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
ASSERT(buckets <= bucketsMask);
ASSERT(mask <= maxMask);
_maskAndBuckets.store(((uintptr_t)newMask << maskShift) | (uintptr_t)newBuckets, std::memory_order_relaxed);
_bucketsAndMaybeMask.store(((uintptr_t)newMask << maskShift) | (uintptr_t)newBuckets, memory_order_relaxed);
_occupied = 0;
}
struct bucket_t *cache_t::emptyBuckets()
mask_t cache_t::mask() const
{
return (bucket_t *)&_objc_empty_cache;
}
struct bucket_t *cache_t::buckets()
{
uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
return (bucket_t *)(maskAndBuckets & bucketsMask);
}
mask_t cache_t::mask()
{
uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed);
return maskAndBuckets >> maskShift;
}
void cache_t::initializeToEmpty()
{
bzero(this, sizeof(*this));
_maskAndBuckets.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
}
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
@ -408,48 +608,31 @@ void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
ASSERT(buckets == (buckets & bucketsMask));
ASSERT(mask <= 0xffff);
// The shift amount is equal to the number of leading zeroes in
// the last 16 bits of mask. Count all the leading zeroes, then
// subtract to ignore the top half.
uintptr_t maskShift = __builtin_clz(mask) - (sizeof(mask) * CHAR_BIT - 16);
ASSERT(mask == (0xffff >> maskShift));
_maskAndBuckets.store(buckets | maskShift, memory_order::memory_order_relaxed);
_bucketsAndMaybeMask.store(buckets | objc::mask16ShiftBits(mask), memory_order_relaxed);
_occupied = 0;
ASSERT(this->buckets() == newBuckets);
ASSERT(this->mask() == newMask);
}
struct bucket_t *cache_t::emptyBuckets()
mask_t cache_t::mask() const
{
return (bucket_t *)((uintptr_t)&_objc_empty_cache & bucketsMask);
}
struct bucket_t *cache_t::buckets()
{
uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
return (bucket_t *)(maskAndBuckets & bucketsMask);
}
mask_t cache_t::mask()
{
uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed);
uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed);
uintptr_t maskShift = (maskAndBuckets & maskMask);
return 0xffff >> maskShift;
}
void cache_t::initializeToEmpty()
{
bzero(this, sizeof(*this));
_maskAndBuckets.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed);
}
#else
#error Unknown cache mask storage type.
#endif
mask_t cache_t::occupied()
struct bucket_t *cache_t::buckets() const
{
uintptr_t addr = _bucketsAndMaybeMask.load(memory_order_relaxed);
return (bucket_t *)(addr & bucketsMask);
}
mask_t cache_t::occupied() const
{
return _occupied;
}
@ -459,11 +642,15 @@ void cache_t::incrementOccupied()
_occupied++;
}
unsigned cache_t::capacity()
unsigned cache_t::capacity() const
{
return mask() ? mask()+1 : 0;
}
Class cache_t::cls() const
{
return (Class)((uintptr_t)this - offsetof(objc_class, cache));
}
size_t cache_t::bytesForCapacity(uint32_t cap)
{
@ -477,22 +664,21 @@ bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap)
return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1;
}
bucket_t *allocateBuckets(mask_t newCapacity)
bucket_t *cache_t::allocateBuckets(mask_t newCapacity)
{
// Allocate one extra bucket to mark the end of the list.
// This can't overflow mask_t because newCapacity is a power of 2.
bucket_t *newBuckets = (bucket_t *)
calloc(cache_t::bytesForCapacity(newCapacity), 1);
bucket_t *newBuckets = (bucket_t *)calloc(bytesForCapacity(newCapacity), 1);
bucket_t *end = cache_t::endMarker(newBuckets, newCapacity);
bucket_t *end = endMarker(newBuckets, newCapacity);
#if __arm__
// End marker's sel is 1 and imp points BEFORE the first bucket.
// This saves an instruction in objc_msgSend.
end->set<NotAtomic, Raw>((SEL)(uintptr_t)1, (IMP)(newBuckets - 1), nil);
end->set<NotAtomic, Raw>(newBuckets, (SEL)(uintptr_t)1, (IMP)(newBuckets - 1), nil);
#else
// End marker's sel is 1 and imp points to the first bucket.
end->set<NotAtomic, Raw>((SEL)(uintptr_t)1, (IMP)newBuckets, nil);
end->set<NotAtomic, Raw>(newBuckets, (SEL)(uintptr_t)1, (IMP)newBuckets, nil);
#endif
if (PrintCaches) recordNewCache(newCapacity);
@ -502,17 +688,21 @@ bucket_t *allocateBuckets(mask_t newCapacity)
#else
bucket_t *allocateBuckets(mask_t newCapacity)
bucket_t *cache_t::allocateBuckets(mask_t newCapacity)
{
if (PrintCaches) recordNewCache(newCapacity);
return (bucket_t *)calloc(cache_t::bytesForCapacity(newCapacity), 1);
return (bucket_t *)calloc(bytesForCapacity(newCapacity), 1);
}
#endif
struct bucket_t *cache_t::emptyBuckets()
{
return (bucket_t *)((uintptr_t)&_objc_empty_cache & bucketsMask);
}
bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true)
bucket_t *cache_t::emptyBucketsForCapacity(mask_t capacity, bool allocate)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
@ -520,11 +710,11 @@ bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true)
runtimeLock.assertLocked();
#endif
size_t bytes = cache_t::bytesForCapacity(capacity);
size_t bytes = bytesForCapacity(capacity);
// Use _objc_empty_cache if the buckets is small enough.
if (bytes <= EMPTY_BYTES) {
return cache_t::emptyBuckets();
return emptyBuckets();
}
// Use shared empty buckets allocated on the heap.
@ -556,17 +746,16 @@ bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true)
return emptyBucketsList[index];
}
bool cache_t::isConstantEmptyCache()
bool cache_t::isConstantEmptyCache() const
{
return
occupied() == 0 &&
buckets() == emptyBucketsForCapacity(capacity(), false);
}
bool cache_t::canBeFreed()
bool cache_t::canBeFreed() const
{
return !isConstantEmptyCache();
return !isConstantEmptyCache() && !isConstantOptimizedCache();
}
ALWAYS_INLINE
@ -585,68 +774,79 @@ void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld)
setBucketsAndMask(newBuckets, newCapacity - 1);
if (freeOld) {
cache_collect_free(oldBuckets, oldCapacity);
collect_free(oldBuckets, oldCapacity);
}
}
void cache_t::bad_cache(id receiver, SEL sel, Class isa)
void cache_t::bad_cache(id receiver, SEL sel)
{
// Log in separate steps in case the logging itself causes a crash.
_objc_inform_now_and_on_crash
("Method cache corrupted. This may be a message to an "
"invalid object, or a memory error somewhere else.");
cache_t *cache = &isa->cache;
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
bucket_t *buckets = cache->_buckets.load(memory_order::memory_order_relaxed);
bucket_t *b = buckets();
_objc_inform_now_and_on_crash
("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
"mask 0x%x, occupied 0x%x",
receiver ? "receiver" : "unused", receiver,
sel, isa, cache, buckets,
cache->_mask.load(memory_order::memory_order_relaxed),
cache->_occupied);
sel, cls(), this, b,
_maybeMask.load(memory_order_relaxed),
_occupied);
_objc_inform_now_and_on_crash
("%s %zu bytes, buckets %zu bytes",
receiver ? "receiver" : "unused", malloc_size(receiver),
malloc_size(buckets));
malloc_size(b));
#elif (CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 || \
CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS || \
CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4)
uintptr_t maskAndBuckets = cache->_maskAndBuckets.load(memory_order::memory_order_relaxed);
uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed);
_objc_inform_now_and_on_crash
("%s %p, SEL %p, isa %p, cache %p, buckets and mask 0x%lx, "
"occupied 0x%x",
receiver ? "receiver" : "unused", receiver,
sel, isa, cache, maskAndBuckets,
cache->_occupied);
sel, cls(), this, maskAndBuckets, _occupied);
_objc_inform_now_and_on_crash
("%s %zu bytes, buckets %zu bytes",
receiver ? "receiver" : "unused", malloc_size(receiver),
malloc_size(cache->buckets()));
malloc_size(buckets()));
#else
#error Unknown cache mask storage type.
#endif
_objc_inform_now_and_on_crash
("selector '%s'", sel_getName(sel));
_objc_inform_now_and_on_crash
("isa '%s'", isa->nameForLogging());
("isa '%s'", cls()->nameForLogging());
_objc_fatal
("Method cache corrupted. This may be a message to an "
"invalid object, or a memory error somewhere else.");
}
ALWAYS_INLINE
void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver)
void cache_t::insert(SEL sel, IMP imp, id receiver)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
#else
runtimeLock.assertLocked();
// Never cache before +initialize is done
if (slowpath(!cls()->isInitialized())) {
return;
}
if (isConstantOptimizedCache()) {
_objc_fatal("cache_t::insert() called with a preoptimized cache for %s",
cls()->nameForLogging());
}
#if DEBUG_TASK_THREADS
return _collecting_in_critical();
#else
#if CONFIG_USE_CACHE_LOCK
mutex_locker_t lock(cacheUpdateLock);
#endif
ASSERT(sel != 0 && cls->isInitialized());
ASSERT(sel != 0 && cls()->isInitialized());
// Use the cache as-is if it is less than 3/4 full
// Use the cache as-is if until we exceed our expected fill ratio.
mask_t newOccupied = occupied() + 1;
unsigned oldCapacity = capacity(), capacity = oldCapacity;
if (slowpath(isConstantEmptyCache())) {
@ -654,9 +854,14 @@ void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver)
if (!capacity) capacity = INIT_CACHE_SIZE;
reallocate(oldCapacity, capacity, /* freeOld */false);
}
else if (fastpath(newOccupied + CACHE_END_MARKER <= capacity / 4 * 3)) {
// Cache is less than 3/4 full. Use it as-is.
else if (fastpath(newOccupied + CACHE_END_MARKER <= cache_fill_ratio(capacity))) {
// Cache is less than 3/4 or 7/8 full. Use it as-is.
}
#if CACHE_ALLOW_FULL_UTILIZATION
else if (capacity <= FULL_UTILIZATION_CACHE_SIZE && newOccupied + CACHE_END_MARKER <= capacity) {
// Allow 100% cache utilization for small buckets. Use it as-is.
}
#endif
else {
capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE;
if (capacity > MAX_CACHE_SIZE) {
@ -671,12 +876,11 @@ void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver)
mask_t i = begin;
// Scan for the first unused slot and insert there.
// There is guaranteed to be an empty slot because the
// minimum size is 4 and we resized at 3/4 full.
// There is guaranteed to be an empty slot.
do {
if (fastpath(b[i].sel() == 0)) {
incrementOccupied();
b[i].set<Atomic, Encoded>(sel, imp, cls);
b[i].set<Atomic, Encoded>(b, sel, imp, cls());
return;
}
if (b[i].sel() == sel) {
@ -686,31 +890,54 @@ void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver)
}
} while (fastpath((i = cache_next(i, m)) != begin));
cache_t::bad_cache(receiver, (SEL)sel, cls);
bad_cache(receiver, (SEL)sel);
#endif // !DEBUG_TASK_THREADS
}
void cache_fill(Class cls, SEL sel, IMP imp, id receiver)
void cache_t::copyCacheNolock(objc_imp_cache_entry *buffer, int len)
{
runtimeLock.assertLocked();
#if !DEBUG_TASK_THREADS
// Never cache before +initialize is done
if (cls->isInitialized()) {
cache_t *cache = getCache(cls);
#if CONFIG_USE_CACHE_LOCK
mutex_locker_t lock(cacheUpdateLock);
#endif
cache->insert(cls, sel, imp, receiver);
}
cacheUpdateLock.assertLocked();
#else
_collecting_in_critical();
runtimeLock.assertLocked();
#endif
}
int wpos = 0;
#if CONFIG_USE_PREOPT_CACHES
if (isConstantOptimizedCache()) {
auto cache = preopt_cache();
auto mask = cache->mask;
uintptr_t sel_base = objc_opt_offsets[OBJC_OPT_METHODNAME_START];
uintptr_t imp_base = (uintptr_t)&cache->entries;
for (uintptr_t index = 0; index <= mask && wpos < len; index++) {
auto &ent = cache->entries[index];
if (~ent.sel_offs) {
buffer[wpos].sel = (SEL)(sel_base + ent.sel_offs);
buffer[wpos].imp = (IMP)(imp_base - ent.imp_offs);
wpos++;
}
}
return;
}
#endif
{
bucket_t *buckets = this->buckets();
uintptr_t count = capacity();
for (uintptr_t index = 0; index < count && wpos < len; index++) {
if (buckets[index].sel()) {
buffer[wpos].imp = buckets[index].imp(buckets, cls());
buffer[wpos].sel = buckets[index].sel();
wpos++;
}
}
}
}
// Reset this entire cache to the uncached lookup by reallocating it.
// This must not shrink the cache - that breaks the lock-free scheme.
void cache_erase_nolock(Class cls)
void cache_t::eraseNolock(const char *func)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
@ -718,29 +945,36 @@ void cache_erase_nolock(Class cls)
runtimeLock.assertLocked();
#endif
cache_t *cache = getCache(cls);
mask_t capacity = cache->capacity();
if (capacity > 0 && cache->occupied() > 0) {
auto oldBuckets = cache->buckets();
if (isConstantOptimizedCache()) {
auto c = cls();
if (PrintCaches) {
_objc_inform("CACHES: %sclass %s: dropping and disallowing preopt cache (from %s)",
c->isMetaClass() ? "meta" : "",
c->nameForLogging(), func);
}
setBucketsAndMask(emptyBuckets(), 0);
c->setDisallowPreoptCaches();
} else if (occupied() > 0) {
auto capacity = this->capacity();
auto oldBuckets = buckets();
auto buckets = emptyBucketsForCapacity(capacity);
cache->setBucketsAndMask(buckets, capacity - 1); // also clears occupied
cache_collect_free(oldBuckets, capacity);
setBucketsAndMask(buckets, capacity - 1); // also clears occupied
collect_free(oldBuckets, capacity);
}
}
void cache_delete(Class cls)
void cache_t::destroy()
{
#if CONFIG_USE_CACHE_LOCK
mutex_locker_t lock(cacheUpdateLock);
#else
runtimeLock.assertLocked();
#endif
if (cls->cache.canBeFreed()) {
if (PrintCaches) recordDeadCache(cls->cache.capacity());
free(cls->cache.buckets());
if (canBeFreed()) {
if (PrintCaches) recordDeadCache(capacity());
free(buckets());
}
}
@ -817,7 +1051,7 @@ extern "C" task_restartable_range_t objc_restartableRanges[];
static bool shouldUseRestartableRanges = true;
#endif
void cache_init()
void cache_t::init()
{
#if HAVE_TASK_RESTARTABLE_RANGES
mach_msg_type_number_t count = 0;
@ -883,7 +1117,18 @@ static int _collecting_in_critical(void)
continue;
// Find out where thread is executing
#if TARGET_OS_OSX
if (oah_is_current_process_translated()) {
kern_return_t ret = objc_thread_get_rip(threads[count], (uint64_t*)&pc);
if (ret != KERN_SUCCESS) {
pc = PC_SENTINEL;
}
} else {
pc = _get_pc_for_thread (threads[count]);
}
#else
pc = _get_pc_for_thread (threads[count]);
#endif
// Check for bad status, and if so, assume the worse (can't collect)
if (pc == PC_SENTINEL)
@ -968,13 +1213,13 @@ static void _garbage_make_room(void)
/***********************************************************************
* cache_collect_free. Add the specified malloc'd memory to the list
* cache_t::collect_free. Add the specified malloc'd memory to the list
* of them to free at some later point.
* size is used for the collection threshold. It does not have to be
* precisely the block's size.
* Cache locks: cacheUpdateLock must be held by the caller.
**********************************************************************/
static void cache_collect_free(bucket_t *data, mask_t capacity)
void cache_t::collect_free(bucket_t *data, mask_t capacity)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
@ -987,7 +1232,7 @@ static void cache_collect_free(bucket_t *data, mask_t capacity)
_garbage_make_room ();
garbage_byte_size += cache_t::bytesForCapacity(capacity);
garbage_refs[garbage_count++] = data;
cache_collect(false);
cache_t::collectNolock(false);
}
@ -996,7 +1241,7 @@ static void cache_collect_free(bucket_t *data, mask_t capacity)
* collectALot tries harder to free memory.
* Cache locks: cacheUpdateLock must be held by the caller.
**********************************************************************/
void cache_collect(bool collectALot)
void cache_t::collectNolock(bool collectALot)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
@ -1293,6 +1538,41 @@ static kern_return_t objc_task_threads
// DEBUG_TASK_THREADS
#endif
OBJC_EXPORT bucket_t * objc_cache_buckets(const cache_t * cache) {
return cache->buckets();
}
#if CONFIG_USE_PREOPT_CACHES
OBJC_EXPORT const preopt_cache_t * _Nonnull objc_cache_preoptCache(const cache_t * _Nonnull cache) {
return cache->preopt_cache();
}
OBJC_EXPORT bool objc_cache_isConstantOptimizedCache(const cache_t * _Nonnull cache, bool strict, uintptr_t empty_addr) {
return cache->isConstantOptimizedCache(strict, empty_addr);
}
OBJC_EXPORT unsigned objc_cache_preoptCapacity(const cache_t * _Nonnull cache) {
return cache->preopt_cache()->capacity();
}
OBJC_EXPORT Class _Nonnull objc_cache_preoptFallbackClass(const cache_t * _Nonnull cache) {
return cache->preoptFallbackClass();
}
#endif
OBJC_EXPORT size_t objc_cache_bytesForCapacity(uint32_t cap) {
return cache_t::bytesForCapacity(cap);
}
OBJC_EXPORT uint32_t objc_cache_occupied(const cache_t * _Nonnull cache) {
return cache->occupied();
}
OBJC_EXPORT unsigned objc_cache_capacity(const struct cache_t * _Nonnull cache) {
return cache->capacity();
}
// __OBJC2__
#endif

View File

@ -336,7 +336,7 @@ static void _class_resolveClassMethod(id inst, SEL sel, Class cls)
ASSERT(cls->isMetaClass());
SEL resolve_sel = @selector(resolveClassMethod:);
if (!lookUpImpOrNil(inst, resolve_sel, cls)) {
if (!lookUpImpOrNilTryCache(inst, resolve_sel, cls)) {
// Resolver not implemented.
return;
}
@ -346,7 +346,7 @@ static void _class_resolveClassMethod(id inst, SEL sel, Class cls)
// Cache the result (good or bad) so the resolver doesn't fire next time.
// +resolveClassMethod adds to self->ISA() a.k.a. cls
IMP imp = lookUpImpOrNil(inst, sel, cls);
IMP imp = lookUpImpOrNilTryCache(inst, sel, cls);
if (resolved && PrintResolving) {
if (imp) {
_objc_inform("RESOLVE: method %c[%s %s] "
@ -376,7 +376,7 @@ static void _class_resolveInstanceMethod(id inst, SEL sel, Class cls)
{
SEL resolve_sel = @selector(resolveInstanceMethod:);
if (! lookUpImpOrNil(cls, resolve_sel, cls->ISA())) {
if (! lookUpImpOrNilTryCache(cls, resolve_sel, cls->ISA())) {
// Resolver not implemented.
return;
}
@ -386,7 +386,7 @@ static void _class_resolveInstanceMethod(id inst, SEL sel, Class cls)
// Cache the result (good or bad) so the resolver doesn't fire next time.
// +resolveInstanceMethod adds to self a.k.a. cls
IMP imp = lookUpImpOrNil(inst, sel, cls);
IMP imp = lookUpImpOrNilTryCache(inst, sel, cls);
if (resolved && PrintResolving) {
if (imp) {
@ -424,7 +424,7 @@ _class_resolveMethod(id inst, SEL sel, Class cls)
// try [nonMetaClass resolveClassMethod:sel]
// and [cls resolveInstanceMethod:sel]
_class_resolveClassMethod(inst, sel, cls);
if (!lookUpImpOrNil(inst, sel, cls)) {
if (!lookUpImpOrNilTryCache(inst, sel, cls)) {
_class_resolveInstanceMethod(inst, sel, cls);
}
}
@ -2593,8 +2593,7 @@ id object_reallocFromZone(id obj, size_t nBytes, void *z)
void *object_getIndexedIvars(id obj)
{
// ivars are tacked onto the end of the object
if (!obj) return nil;
if (obj->isTaggedPointer()) return nil;
if (obj->isTaggedPointerOrNil()) return nil;
return ((char *) obj) + obj->ISA()->alignedInstanceSize();
}

View File

@ -159,6 +159,9 @@
#include "objc-private.h"
#include "objc-abi.h"
#include <objc/message.h>
#if !TARGET_OS_WIN32
#include <os/linker_set.h>
#endif
/***********************************************************************
* Information about multi-thread support:
@ -195,9 +198,9 @@ Class object_setClass(id obj, Class cls)
// weakly-referenced object has an un-+initialized isa.
// Unresolved future classes are not so protected.
if (!cls->isFuture() && !cls->isInitialized()) {
// use lookUpImpOrNil to indirectly provoke +initialize
// use lookUpImpOrNilTryCache to indirectly provoke +initialize
// to avoid duplicating the code to actually send +initialize
lookUpImpOrNil(nil, @selector(initialize), cls, LOOKUP_INITIALIZE);
lookUpImpOrNilTryCache(nil, @selector(initialize), cls, LOOKUP_INITIALIZE);
}
return obj->changeIsa(cls);
@ -281,7 +284,7 @@ _class_lookUpIvar(Class cls, Ivar ivar, ptrdiff_t& ivarOffset,
// Preflight the hasAutomaticIvars check
// because _class_getClassForIvar() may need to take locks.
bool hasAutomaticIvars = NO;
for (Class c = cls; c; c = c->superclass) {
for (Class c = cls; c; c = c->getSuperclass()) {
if (c->hasAutomaticIvars()) {
hasAutomaticIvars = YES;
break;
@ -337,7 +340,7 @@ _class_getIvarMemoryManagement(Class cls, Ivar ivar)
static ALWAYS_INLINE
void _object_setIvar(id obj, Ivar ivar, id value, bool assumeStrong)
{
if (!obj || !ivar || obj->isTaggedPointer()) return;
if (!ivar || obj->isTaggedPointerOrNil()) return;
ptrdiff_t offset;
objc_ivar_memory_management_t memoryManagement;
@ -371,7 +374,7 @@ void object_setIvarWithStrongDefault(id obj, Ivar ivar, id value)
id object_getIvar(id obj, Ivar ivar)
{
if (!obj || !ivar || obj->isTaggedPointer()) return nil;
if (!ivar || obj->isTaggedPointerOrNil()) return nil;
ptrdiff_t offset;
objc_ivar_memory_management_t memoryManagement;
@ -393,7 +396,7 @@ Ivar _object_setInstanceVariable(id obj, const char *name, void *value,
{
Ivar ivar = nil;
if (obj && name && !obj->isTaggedPointer()) {
if (name && !obj->isTaggedPointerOrNil()) {
if ((ivar = _class_getVariable(obj->ISA(), name))) {
_object_setIvar(obj, ivar, (id)value, assumeStrong);
}
@ -415,7 +418,7 @@ Ivar object_setInstanceVariableWithStrongDefault(id obj, const char *name,
Ivar object_getInstanceVariable(id obj, const char *name, void **value)
{
if (obj && name && !obj->isTaggedPointer()) {
if (name && !obj->isTaggedPointerOrNil()) {
Ivar ivar;
if ((ivar = class_getInstanceVariable(obj->ISA(), name))) {
if (value) *value = (void *)object_getIvar(obj, ivar);
@ -440,7 +443,7 @@ static void object_cxxDestructFromClass(id obj, Class cls)
// Call cls's dtor first, then superclasses's dtors.
for ( ; cls; cls = cls->superclass) {
for ( ; cls; cls = cls->getSuperclass()) {
if (!cls->hasCxxDtor()) return;
dtor = (void(*)(id))
lookupMethodInClassAndLoadCache(cls, SEL_cxx_destruct);
@ -462,8 +465,7 @@ static void object_cxxDestructFromClass(id obj, Class cls)
**********************************************************************/
void object_cxxDestruct(id obj)
{
if (!obj) return;
if (obj->isTaggedPointer()) return;
if (obj->isTaggedPointerOrNil()) return;
object_cxxDestructFromClass(obj, obj->ISA());
}
@ -491,7 +493,7 @@ object_cxxConstructFromClass(id obj, Class cls, int flags)
id (*ctor)(id);
Class supercls;
supercls = cls->superclass;
supercls = cls->getSuperclass();
// Call superclasses' ctors first, if any.
if (supercls && supercls->hasCxxCtor()) {
@ -510,7 +512,7 @@ object_cxxConstructFromClass(id obj, Class cls, int flags)
}
if (fastpath((*ctor)(obj))) return obj; // ctor called and succeeded - ok
supercls = cls->superclass; // this reload avoids a spill on the stack
supercls = cls->getSuperclass(); // this reload avoids a spill on the stack
// This class's ctor was called and failed.
// Call superclasses's dtors to clean up.
@ -530,7 +532,7 @@ object_cxxConstructFromClass(id obj, Class cls, int flags)
**********************************************************************/
void fixupCopiedIvars(id newObject, id oldObject)
{
for (Class cls = oldObject->ISA(); cls; cls = cls->superclass) {
for (Class cls = oldObject->ISA(); cls; cls = cls->getSuperclass()) {
if (cls->hasAutomaticIvars()) {
// Use alignedInstanceStart() because unaligned bytes at the start
// of this class's ivars are not represented in the layout bitmap.
@ -636,12 +638,12 @@ BOOL class_respondsToSelector(Class cls, SEL sel)
// inst is an instance of cls or a subclass thereof, or nil if none is known.
// Non-nil inst is faster in some cases. See lookUpImpOrForward() for details.
NEVER_INLINE BOOL
NEVER_INLINE __attribute__((flatten)) BOOL
class_respondsToSelector_inst(id inst, SEL sel, Class cls)
{
// Avoids +initialize because it historically did so.
// We're not returning a callable IMP anyway.
return sel && cls && lookUpImpOrNil(inst, sel, cls, LOOKUP_RESOLVER);
return sel && cls && lookUpImpOrNilTryCache(inst, sel, cls, LOOKUP_RESOLVER);
}
@ -662,13 +664,16 @@ IMP class_lookupMethod(Class cls, SEL sel)
return class_getMethodImplementation(cls, sel);
}
__attribute__((flatten))
IMP class_getMethodImplementation(Class cls, SEL sel)
{
IMP imp;
if (!cls || !sel) return nil;
imp = lookUpImpOrNil(nil, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER);
lockdebug_assert_no_locks_locked_except({ &loadMethodLock });
imp = lookUpImpOrNilTryCache(nil, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER);
// Translate forwarding function to C-callable external version
if (!imp) {
@ -775,7 +780,7 @@ Class _calloc_class(size_t size)
Class class_getSuperclass(Class cls)
{
if (!cls) return nil;
return cls->superclass;
return cls->getSuperclass();
}
BOOL class_isMetaClass(Class cls)
@ -886,6 +891,15 @@ inform_duplicate(const char *name, Class oldCls, Class newCls)
const header_info *newHeader = _headerForClass(newCls);
const char *oldName = oldHeader ? oldHeader->fname() : "??";
const char *newName = newHeader ? newHeader->fname() : "??";
const objc_duplicate_class **_dupi = NULL;
LINKER_SET_FOREACH(_dupi, const objc_duplicate_class **, "__objc_dupclass") {
const objc_duplicate_class *dupi = *_dupi;
if (strcmp(dupi->name, name) == 0) {
return;
}
}
(DebugDuplicateClasses ? _objc_fatal : _objc_inform)
("Class %s is implemented in both %s (%p) and %s (%p). "

View File

@ -26,15 +26,6 @@
#include <TargetConditionals.h>
// Define __OBJC2__ for the benefit of our asm files.
#ifndef __OBJC2__
# if TARGET_OS_OSX && !TARGET_OS_IOSMAC && __i386__
// old ABI
# else
# define __OBJC2__ 1
# endif
#endif
// Avoid the !NDEBUG double negative.
#if !NDEBUG
# define DEBUG 1
@ -51,7 +42,7 @@
#endif
// Define SUPPORT_ZONES=1 to enable malloc zone support in NXHashTable.
#if !(TARGET_OS_OSX || TARGET_OS_IOSMAC)
#if !(TARGET_OS_OSX || TARGET_OS_MACCATALYST)
# define SUPPORT_ZONES 0
#else
# define SUPPORT_ZONES 1
@ -73,7 +64,7 @@
// Define SUPPORT_TAGGED_POINTERS=1 to enable tagged pointer objects
// Be sure to edit tagged pointer SPI in objc-internal.h as well.
#if !(__OBJC2__ && __LP64__)
#if !__LP64__
# define SUPPORT_TAGGED_POINTERS 0
#else
# define SUPPORT_TAGGED_POINTERS 1
@ -82,7 +73,7 @@
// Define SUPPORT_MSB_TAGGED_POINTERS to use the MSB
// as the tagged pointer marker instead of the LSB.
// Be sure to edit tagged pointer SPI in objc-internal.h as well.
#if !SUPPORT_TAGGED_POINTERS || (TARGET_OS_OSX || TARGET_OS_IOSMAC)
#if !SUPPORT_TAGGED_POINTERS || ((TARGET_OS_OSX || TARGET_OS_MACCATALYST) && __x86_64__)
# define SUPPORT_MSB_TAGGED_POINTERS 0
#else
# define SUPPORT_MSB_TAGGED_POINTERS 1
@ -101,7 +92,7 @@
// Define SUPPORT_PACKED_ISA=1 on platforms that store the class in the isa
// field as a maskable pointer with other data around it.
#if (!__LP64__ || TARGET_OS_WIN32 || \
(TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC))
(TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST && !__arm64__))
# define SUPPORT_PACKED_ISA 0
#else
# define SUPPORT_PACKED_ISA 1
@ -126,7 +117,7 @@
// Define SUPPORT_ZEROCOST_EXCEPTIONS to use "zero-cost" exceptions for OBJC2.
// Be sure to edit objc-exception.h as well (objc_add/removeExceptionHandler)
#if !__OBJC2__ || (defined(__arm__) && __USING_SJLJ_EXCEPTIONS__)
#if defined(__arm__) && __USING_SJLJ_EXCEPTIONS__
# define SUPPORT_ZEROCOST_EXCEPTIONS 0
#else
# define SUPPORT_ZEROCOST_EXCEPTIONS 1
@ -162,9 +153,16 @@
# define SUPPORT_MESSAGE_LOGGING 1
#endif
// Define SUPPORT_AUTORELEASEPOOL_DEDDUP_PTRS to combine consecutive pointers to the same object in autorelease pools
#if !__LP64__
# define SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS 0
#else
# define SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS 1
#endif
// Define HAVE_TASK_RESTARTABLE_RANGES to enable usage of
// task_restartable_ranges_synchronize()
#if TARGET_OS_SIMULATOR || defined(__i386__) || defined(__arm__) || !TARGET_OS_MAC || defined(DARLING)
#if TARGET_OS_SIMULATOR || defined(__i386__) || defined(__arm__) || !TARGET_OS_MAC
# define HAVE_TASK_RESTARTABLE_RANGES 0
#else
# define HAVE_TASK_RESTARTABLE_RANGES 1
@ -178,16 +176,12 @@
// because objc-class.h is public and objc-config.h is not.
//#define OBJC_INSTRUMENTED
// In __OBJC2__, the runtimeLock is a mutex always held
// hence the cache lock is redundant and can be elided.
// The runtimeLock is a mutex always held hence the cache lock is
// redundant and can be elided.
//
// If the runtime lock ever becomes a rwlock again,
// the cache lock would need to be used again
#if __OBJC2__
#define CONFIG_USE_CACHE_LOCK 0
#else
#define CONFIG_USE_CACHE_LOCK 1
#endif
// Determine how the method cache stores IMPs.
#define CACHE_IMP_ENCODING_NONE 1 // Method cache contains raw IMP.
@ -208,13 +202,75 @@
#define CACHE_MASK_STORAGE_OUTLINED 1
#define CACHE_MASK_STORAGE_HIGH_16 2
#define CACHE_MASK_STORAGE_LOW_4 3
#define CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS 4
#if defined(__arm64__) && __LP64__
#if TARGET_OS_OSX || TARGET_OS_SIMULATOR
#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
#else
#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_HIGH_16
#endif
#elif defined(__arm64__) && !__LP64__
#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_LOW_4
#else
#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_OUTLINED
#endif
// Constants used for signing/authing isas. This doesn't quite belong
// here, but the asm files can't import other headers.
#define ISA_SIGNING_DISCRIMINATOR 0x6AE1
#define ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS 0xB5AB
#define ISA_SIGNING_KEY ptrauth_key_process_independent_data
// ISA signing authentication modes. Set ISA_SIGNING_AUTH_MODE to one
// of these to choose how ISAs are authenticated.
#define ISA_SIGNING_STRIP 1 // Strip the signature whenever reading an ISA.
#define ISA_SIGNING_AUTH 2 // Authenticate the signature on all ISAs.
// ISA signing modes. Set ISA_SIGNING_SIGN_MODE to one of these to
// choose how ISAs are signed.
#define ISA_SIGNING_SIGN_NONE 1 // Sign no ISAs.
#define ISA_SIGNING_SIGN_ONLY_SWIFT 2 // Only sign ISAs of Swift objects.
#define ISA_SIGNING_SIGN_ALL 3 // Sign all ISAs.
#if __has_feature(ptrauth_objc_isa_strips) || __has_feature(ptrauth_objc_isa_signs) || __has_feature(ptrauth_objc_isa_authenticates)
# if __has_feature(ptrauth_objc_isa_authenticates)
# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_AUTH
# else
# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_STRIP
# endif
# if __has_feature(ptrauth_objc_isa_signs)
# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_ALL
# else
# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_NONE
# endif
#else
# if __has_feature(ptrauth_objc_isa)
# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_AUTH
# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_ALL
# else
# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_STRIP
# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_NONE
# endif
#endif
// When set, an unsigned superclass pointer is treated as Nil, which
// will treat the class as if its superclass was weakly linked and
// not loaded, and cause uses of the class to resolve to Nil.
#define SUPERCLASS_SIGNING_TREAT_UNSIGNED_AS_NIL 0
#if defined(__arm64__) && TARGET_OS_IOS && !TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST
#define CONFIG_USE_PREOPT_CACHES 1
#else
#define CONFIG_USE_PREOPT_CACHES 0
#endif
// When set to 1, small methods in the shared cache have a direct
// offset to a selector. When set to 0, small methods in the shared
// cache have the same format as other small methods, with an offset
// to a selref.
#define CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS 1
#endif

View File

@ -36,6 +36,7 @@ OPTION( DebugMissingPools, OBJC_DEBUG_MISSING_POOLS, "warn about a
OPTION( DebugPoolAllocation, OBJC_DEBUG_POOL_ALLOCATION, "halt when autorelease pools are popped out of order, and allow heap debuggers to track autorelease pools")
OPTION( DebugDuplicateClasses, OBJC_DEBUG_DUPLICATE_CLASSES, "halt when multiple classes with the same name are present")
OPTION( DebugDontCrash, OBJC_DEBUG_DONT_CRASH, "halt the process by exiting instead of crashing")
OPTION( DebugPoolDepth, OBJC_DEBUG_POOL_DEPTH, "log fault when at least a set number of autorelease pages has been allocated")
OPTION( DisableVtables, OBJC_DISABLE_VTABLES, "disable vtable dispatch")
OPTION( DisablePreopt, OBJC_DISABLE_PREOPTIMIZATION, "disable preoptimization courtesy of dyld shared cache")
@ -43,3 +44,7 @@ OPTION( DisableTaggedPointers, OBJC_DISABLE_TAGGED_POINTERS, "disable tagg
OPTION( DisableTaggedPointerObfuscation, OBJC_DISABLE_TAG_OBFUSCATION, "disable obfuscation of tagged pointers")
OPTION( DisableNonpointerIsa, OBJC_DISABLE_NONPOINTER_ISA, "disable non-pointer isa fields")
OPTION( DisableInitializeForkSafety, OBJC_DISABLE_INITIALIZE_FORK_SAFETY, "disable safety checks for +initialize after fork")
OPTION( DisableFaults, OBJC_DISABLE_FAULTS, "disable os faults")
OPTION( DisablePreoptCaches, OBJC_DISABLE_PREOPTIMIZED_CACHES, "disable preoptimized caches")
OPTION( DisableAutoreleaseCoalescing, OBJC_DISABLE_AUTORELEASE_COALESCING, "disable coalescing of autorelease pool pointers")
OPTION( DisableAutoreleaseCoalescingLRU, OBJC_DISABLE_AUTORELEASE_COALESCING_LRU, "disable coalescing of autorelease pool pointers using look back N strategy")

View File

@ -440,7 +440,7 @@ static int _objc_default_exception_matcher(Class catch_cls, id exception)
Class cls;
for (cls = exception->getIsa();
cls != nil;
cls = cls->superclass)
cls = cls->getSuperclass())
{
if (cls == catch_cls) return 1;
}

View File

@ -38,10 +38,14 @@ extern message_ref_t *_getObjc2MessageRefs(const header_info *hi, size_t *count)
extern Class*_getObjc2ClassRefs(const header_info *hi, size_t *count);
extern Class*_getObjc2SuperRefs(const header_info *hi, size_t *count);
extern classref_t const *_getObjc2ClassList(const header_info *hi, size_t *count);
extern classref_t const *_getObjc2NonlazyClassList(const header_info *hi, size_t *count);
extern category_t * const *_getObjc2CategoryList(const header_info *hi, size_t *count);
extern category_t * const *_getObjc2CategoryList2(const header_info *hi, size_t *count);
extern category_t * const *_getObjc2NonlazyCategoryList(const header_info *hi, size_t *count);
// Use hi->nlclslist() instead
// extern classref_t const *_getObjc2NonlazyClassList(const header_info *hi, size_t *count);
// Use hi->catlist() instead
// extern category_t * const *_getObjc2CategoryList(const header_info *hi, size_t *count);
// Use hi->catlist2() instead
// extern category_t * const *_getObjc2CategoryList2(const header_info *hi, size_t *count);
// Use hi->nlcatlist() instead
// extern category_t * const *_getObjc2NonlazyCategoryList(const header_info *hi, size_t *count);
extern protocol_t * const *_getObjc2ProtocolList(const header_info *hi, size_t *count);
extern protocol_t **_getObjc2ProtocolRefs(const header_info *hi, size_t *count);
@ -50,6 +54,10 @@ struct UnsignedInitializer {
private:
uintptr_t storage;
public:
UnsignedInitializer(uint32_t offset) {
storage = (uintptr_t)&_mh_dylib_header + offset;
}
void operator () () const {
using Initializer = void(*)();
Initializer init =
@ -62,8 +70,11 @@ public:
extern UnsignedInitializer *getLibobjcInitializers(const header_info *hi, size_t *count);
extern classref_t const *_getObjc2NonlazyClassList(const headerType *mhdr, size_t *count);
extern category_t * const *_getObjc2CategoryList(const headerType *mhdr, size_t *count);
extern category_t * const *_getObjc2CategoryList2(const headerType *mhdr, size_t *count);
extern category_t * const *_getObjc2NonlazyCategoryList(const headerType *mhdr, size_t *count);
extern UnsignedInitializer *getLibobjcInitializers(const headerType *mhdr, size_t *count);
extern uint32_t *getLibobjcInitializerOffsets(const headerType *hi, size_t *count);
static inline void
foreach_data_segment(const headerType *mhdr,
@ -83,11 +94,12 @@ foreach_data_segment(const headerType *mhdr,
seg = (const segmentType *)((char *)seg + seg->cmdsize);
}
// enumerate __DATA* segments
// enumerate __DATA* and __AUTH* segments
seg = (const segmentType *) (mhdr + 1);
for (unsigned long i = 0; i < mhdr->ncmds; i++) {
if (seg->cmd == SEGMENT_CMD &&
segnameStartsWith(seg->segname, "__DATA"))
(segnameStartsWith(seg->segname, "__DATA") ||
segnameStartsWith(seg->segname, "__AUTH")))
{
code(seg, slide);
}

View File

@ -68,6 +68,12 @@ GETSECT(_getObjc2ProtocolList, protocol_t * const, "__objc_protolist")
GETSECT(_getObjc2ProtocolRefs, protocol_t *, "__objc_protorefs");
GETSECT(getLibobjcInitializers, UnsignedInitializer, "__objc_init_func");
uint32_t *getLibobjcInitializerOffsets(const headerType *mhdr, size_t *outCount) {
unsigned long byteCount = 0;
uint32_t *offsets = (uint32_t *)getsectiondata(mhdr, "__TEXT", "__objc_init_offs", &byteCount);
if (outCount) *outCount = byteCount / sizeof(uint32_t);
return offsets;
}
objc_image_info *
_getObjcImageInfo(const headerType *mhdr, size_t *outBytes)

View File

@ -219,6 +219,10 @@ OBJC_EXPORT uintptr_t objc_debug_taggedpointer_mask
OBJC_EXPORT uintptr_t objc_debug_taggedpointer_obfuscator
OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0);
#if OBJC_SPLIT_TAGGED_POINTERS
OBJC_EXPORT uint8_t objc_debug_tag60_permutations[8];
#endif
// tag_slot = (obj >> slot_shift) & slot_mask
OBJC_EXPORT unsigned int objc_debug_taggedpointer_slot_shift
@ -266,6 +270,9 @@ OBJC_EXPORT unsigned int objc_debug_taggedpointer_ext_payload_lshift
OBJC_EXPORT unsigned int objc_debug_taggedpointer_ext_payload_rshift
OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0, 2.0);
OBJC_EXPORT uintptr_t objc_debug_constant_cfstring_tag_bits
OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 6.0);
#endif
@ -289,6 +296,9 @@ OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset OBJC_AVA
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
#if __OBJC2__
OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 6.0);
#endif
__END_DECLS

View File

@ -396,10 +396,10 @@ static bool classHasTrivialInitialize(Class cls)
{
if (cls->isRootClass() || cls->isRootMetaclass()) return true;
Class rootCls = cls->ISA()->ISA()->superclass;
Class rootCls = cls->ISA()->ISA()->getSuperclass();
IMP rootImp = lookUpImpOrNil(rootCls, @selector(initialize), rootCls->ISA());
IMP imp = lookUpImpOrNil(cls, @selector(initialize), cls->ISA());
IMP rootImp = lookUpImpOrNilTryCache(rootCls, @selector(initialize), rootCls->ISA());
IMP imp = lookUpImpOrNilTryCache(cls, @selector(initialize), cls->ISA());
return (imp == nil || imp == (IMP)&objc_noop_imp || imp == rootImp);
}
@ -500,7 +500,7 @@ void initializeNonMetaClass(Class cls)
// Make sure super is done initializing BEFORE beginning to initialize cls.
// See note about deadlock above.
supercls = cls->superclass;
supercls = cls->getSuperclass();
if (supercls && !supercls->isInitialized()) {
initializeNonMetaClass(supercls);
}

View File

@ -44,6 +44,11 @@
#include <mach-o/loader.h>
#include <dispatch/dispatch.h>
// Include NSObject.h only if we're ObjC. Module imports get unhappy
// otherwise.
#if __OBJC__
#include <objc/NSObject.h>
#endif
// Termination reasons in the OS_REASON_OBJC namespace.
#define OBJC_EXIT_REASON_UNSPECIFIED 1
@ -54,6 +59,18 @@
// The runtime's class structure will never grow beyond this.
#define OBJC_MAX_CLASS_SIZE (32*sizeof(void*))
// Private objc_setAssociatedObject policy modifier. When an object is
// destroyed, associated objects attached to that object that are marked with
// this will be released after all associated objects not so marked.
//
// In addition, such associations are not removed when calling
// objc_removeAssociatedObjects.
//
// NOTE: This should be used sparingly. Performance will be poor when a single
// object has more than a few (deliberately vague) associated objects marked
// with this flag. If you're not sure if you should use this, you should not use
// this!
#define _OBJC_ASSOCIATION_SYSTEM_OBJECT (1 << 16)
__BEGIN_DECLS
@ -160,8 +177,14 @@ OBJC_EXPORT
objc_imp_cache_entry *_Nullable
class_copyImpCache(Class _Nonnull cls, int * _Nullable outCount)
OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0);
OBJC_EXPORT
unsigned long
sel_hash(SEL _Nullable sel)
OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 6.0);
#endif
// Plainly-implemented GC barriers. Rosetta used to use these.
OBJC_EXPORT id _Nullable
objc_assign_strongCast_generic(id _Nullable value, id _Nullable * _Nonnull dest)
@ -199,7 +222,7 @@ OBJC_EXPORT void
_objc_setClassLoader(BOOL (* _Nonnull newClassLoader)(const char * _Nonnull))
OBJC2_UNAVAILABLE;
#if !(TARGET_OS_OSX && !TARGET_OS_IOSMAC && __i386__)
#if !(TARGET_OS_OSX && !TARGET_OS_MACCATALYST && __i386__)
// Add a class copy fixup handler. The name is a misnomer, as
// multiple calls will install multiple handlers. Older versions
// of the Swift runtime call it by name, and it's only used by Swift
@ -240,6 +263,21 @@ objc_copyClassNamesForImageHeader(const struct mach_header * _Nonnull mh,
unsigned int * _Nullable outCount)
OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0);
/**
* Returns the all the classes within a library.
*
* @param image The mach header for library or framework you are inquiring about.
* @param outCount The number of class names returned.
*
* @return An array of Class objects
*/
OBJC_EXPORT Class _Nonnull * _Nullable
objc_copyClassesForImage(const char * _Nonnull image,
unsigned int * _Nullable outCount)
OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 4.0);
// Tagged pointer objects.
#if __LP64__
@ -287,6 +325,18 @@ enum
OBJC_TAG_UIColor = 17,
OBJC_TAG_CGColor = 18,
OBJC_TAG_NSIndexSet = 19,
OBJC_TAG_NSMethodSignature = 20,
OBJC_TAG_UTTypeRecord = 21,
// When using the split tagged pointer representation
// (OBJC_SPLIT_TAGGED_POINTERS), this is the first tag where
// the tag and payload are unobfuscated. All tags from here to
// OBJC_TAG_Last52BitPayload are unobfuscated. The shared cache
// builder is able to construct these as long as the low bit is
// not set (i.e. even-numbered tags).
OBJC_TAG_FirstUnobfuscatedSplitTag = 136, // 128 + 8, first ext tag with high bit set
OBJC_TAG_Constant_CFString = 136,
OBJC_TAG_First60BitPayload = 0,
OBJC_TAG_Last60BitPayload = 6,
@ -350,7 +400,16 @@ _objc_getTaggedPointerSignedValue(const void * _Nullable ptr);
// Don't use the values below. Use the declarations above.
#if (TARGET_OS_OSX || TARGET_OS_IOSMAC) && __x86_64__
#if __arm64__
// ARM64 uses a new tagged pointer scheme where normal tags are in
// the low bits, extended tags are in the high bits, and half of the
// extended tag space is reserved for unobfuscated payloads.
# define OBJC_SPLIT_TAGGED_POINTERS 1
#else
# define OBJC_SPLIT_TAGGED_POINTERS 0
#endif
#if (TARGET_OS_OSX || TARGET_OS_MACCATALYST) && __x86_64__
// 64-bit Mac - tag bit is LSB
# define OBJC_MSB_TAGGED_POINTERS 0
#else
@ -358,17 +417,37 @@ _objc_getTaggedPointerSignedValue(const void * _Nullable ptr);
# define OBJC_MSB_TAGGED_POINTERS 1
#endif
#define _OBJC_TAG_INDEX_MASK 0x7
#define _OBJC_TAG_INDEX_MASK 0x7UL
#if OBJC_SPLIT_TAGGED_POINTERS
#define _OBJC_TAG_SLOT_COUNT 8
#define _OBJC_TAG_SLOT_MASK 0x7UL
#else
// array slot includes the tag bit itself
#define _OBJC_TAG_SLOT_COUNT 16
#define _OBJC_TAG_SLOT_MASK 0xf
#define _OBJC_TAG_SLOT_MASK 0xfUL
#endif
#define _OBJC_TAG_EXT_INDEX_MASK 0xff
// array slot has no extra bits
#define _OBJC_TAG_EXT_SLOT_COUNT 256
#define _OBJC_TAG_EXT_SLOT_MASK 0xff
#if OBJC_MSB_TAGGED_POINTERS
#if OBJC_SPLIT_TAGGED_POINTERS
# define _OBJC_TAG_MASK (1UL<<63)
# define _OBJC_TAG_INDEX_SHIFT 0
# define _OBJC_TAG_SLOT_SHIFT 0
# define _OBJC_TAG_PAYLOAD_LSHIFT 1
# define _OBJC_TAG_PAYLOAD_RSHIFT 4
# define _OBJC_TAG_EXT_MASK (_OBJC_TAG_MASK | 0x7UL)
# define _OBJC_TAG_NO_OBFUSCATION_MASK ((1UL<<62) | _OBJC_TAG_EXT_MASK)
# define _OBJC_TAG_CONSTANT_POINTER_MASK \
~(_OBJC_TAG_EXT_MASK | ((uintptr_t)_OBJC_TAG_EXT_SLOT_MASK << _OBJC_TAG_EXT_SLOT_SHIFT))
# define _OBJC_TAG_EXT_INDEX_SHIFT 55
# define _OBJC_TAG_EXT_SLOT_SHIFT 55
# define _OBJC_TAG_EXT_PAYLOAD_LSHIFT 9
# define _OBJC_TAG_EXT_PAYLOAD_RSHIFT 12
#elif OBJC_MSB_TAGGED_POINTERS
# define _OBJC_TAG_MASK (1UL<<63)
# define _OBJC_TAG_INDEX_SHIFT 60
# define _OBJC_TAG_SLOT_SHIFT 60
@ -392,18 +471,61 @@ _objc_getTaggedPointerSignedValue(const void * _Nullable ptr);
# define _OBJC_TAG_EXT_PAYLOAD_RSHIFT 12
#endif
// Map of tags to obfuscated tags.
extern uintptr_t objc_debug_taggedpointer_obfuscator;
#if OBJC_SPLIT_TAGGED_POINTERS
extern uint8_t objc_debug_tag60_permutations[8];
static inline uintptr_t _objc_basicTagToObfuscatedTag(uintptr_t tag) {
return objc_debug_tag60_permutations[tag];
}
static inline uintptr_t _objc_obfuscatedTagToBasicTag(uintptr_t tag) {
for (unsigned i = 0; i < 7; i++)
if (objc_debug_tag60_permutations[i] == tag)
return i;
return 7;
}
#endif
static inline void * _Nonnull
_objc_encodeTaggedPointer(uintptr_t ptr)
{
return (void *)(objc_debug_taggedpointer_obfuscator ^ ptr);
uintptr_t value = (objc_debug_taggedpointer_obfuscator ^ ptr);
#if OBJC_SPLIT_TAGGED_POINTERS
if ((value & _OBJC_TAG_NO_OBFUSCATION_MASK) == _OBJC_TAG_NO_OBFUSCATION_MASK)
return (void *)ptr;
uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK;
uintptr_t permutedTag = _objc_basicTagToObfuscatedTag(basicTag);
value &= ~(_OBJC_TAG_INDEX_MASK << _OBJC_TAG_INDEX_SHIFT);
value |= permutedTag << _OBJC_TAG_INDEX_SHIFT;
#endif
return (void *)value;
}
static inline uintptr_t
_objc_decodeTaggedPointer_noPermute(const void * _Nullable ptr)
{
uintptr_t value = (uintptr_t)ptr;
#if OBJC_SPLIT_TAGGED_POINTERS
if ((value & _OBJC_TAG_NO_OBFUSCATION_MASK) == _OBJC_TAG_NO_OBFUSCATION_MASK)
return value;
#endif
return value ^ objc_debug_taggedpointer_obfuscator;
}
static inline uintptr_t
_objc_decodeTaggedPointer(const void * _Nullable ptr)
{
return (uintptr_t)ptr ^ objc_debug_taggedpointer_obfuscator;
uintptr_t value = _objc_decodeTaggedPointer_noPermute(ptr);
#if OBJC_SPLIT_TAGGED_POINTERS
uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK;
value &= ~(_OBJC_TAG_INDEX_MASK << _OBJC_TAG_INDEX_SHIFT);
value |= _objc_obfuscatedTagToBasicTag(basicTag) << _OBJC_TAG_INDEX_SHIFT;
#endif
return value;
}
static inline bool
@ -445,6 +567,15 @@ _objc_isTaggedPointer(const void * _Nullable ptr)
return ((uintptr_t)ptr & _OBJC_TAG_MASK) == _OBJC_TAG_MASK;
}
static inline bool
_objc_isTaggedPointerOrNil(const void * _Nullable ptr)
{
// this function is here so that clang can turn this into
// a comparison with NULL when this is appropriate
// it turns out it's not able to in many cases without this
return !ptr || ((uintptr_t)ptr & _OBJC_TAG_MASK) == _OBJC_TAG_MASK;
}
static inline objc_tag_index_t
_objc_getTaggedPointerTag(const void * _Nullable ptr)
{
@ -463,7 +594,7 @@ static inline uintptr_t
_objc_getTaggedPointerValue(const void * _Nullable ptr)
{
// ASSERT(_objc_isTaggedPointer(ptr));
uintptr_t value = _objc_decodeTaggedPointer(ptr);
uintptr_t value = _objc_decodeTaggedPointer_noPermute(ptr);
uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK;
if (basicTag == _OBJC_TAG_INDEX_MASK) {
return (value << _OBJC_TAG_EXT_PAYLOAD_LSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_RSHIFT;
@ -476,7 +607,7 @@ static inline intptr_t
_objc_getTaggedPointerSignedValue(const void * _Nullable ptr)
{
// ASSERT(_objc_isTaggedPointer(ptr));
uintptr_t value = _objc_decodeTaggedPointer(ptr);
uintptr_t value = _objc_decodeTaggedPointer_noPermute(ptr);
uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK;
if (basicTag == _OBJC_TAG_INDEX_MASK) {
return ((intptr_t)value << _OBJC_TAG_EXT_PAYLOAD_LSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_RSHIFT;
@ -485,6 +616,13 @@ _objc_getTaggedPointerSignedValue(const void * _Nullable ptr)
}
}
# if OBJC_SPLIT_TAGGED_POINTERS
static inline void * _Nullable
_objc_getTaggedPointerRawPointerValue(const void * _Nullable ptr) {
return (void *)((uintptr_t)ptr & _OBJC_TAG_CONSTANT_POINTER_MASK);
}
# endif
// OBJC_HAVE_TAGGED_POINTERS
#endif
@ -595,6 +733,11 @@ _class_getIvarMemoryManagement(Class _Nullable cls, Ivar _Nonnull ivar)
OBJC_EXPORT BOOL _class_isFutureClass(Class _Nullable cls)
OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0, 2.0);
/// Returns true if the class is an ABI stable Swift class. (Despite
/// the name, this does NOT return true for Swift classes built with
/// Swift versions prior to 5.0.)
OBJC_EXPORT BOOL _class_isSwift(Class _Nullable cls)
OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 5.0);
// API to only be called by root classes like NSObject or NSProxy
@ -876,12 +1019,47 @@ typedef void (*_objc_func_willInitializeClass)(void * _Nullable context, Class _
OBJC_EXPORT void _objc_addWillInitializeClassFunc(_objc_func_willInitializeClass _Nonnull func, void * _Nullable context)
OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0);
// Replicate the conditionals in objc-config.h for packed isa, indexed isa, and preopt caches
#if __ARM_ARCH_7K__ >= 2 || (__arm64__ && !__LP64__) || \
!(!__LP64__ || TARGET_OS_WIN32 || \
(TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST && !__arm64__))
OBJC_EXPORT const uintptr_t _objc_has_weak_formation_callout;
#define OBJC_WEAK_FORMATION_CALLOUT_DEFINED 1
#else
#define OBJC_WEAK_FORMATION_CALLOUT_DEFINED 0
#endif
#if defined(__arm64__) && TARGET_OS_IOS && !TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST
#define CONFIG_USE_PREOPT_CACHES 1
#else
#define CONFIG_USE_PREOPT_CACHES 0
#endif
#if __OBJC2__
// Helper function for objc4 tests only! Do not call this yourself
// for any reason ever.
OBJC_EXPORT void _method_setImplementationRawUnsafe(Method _Nonnull m, IMP _Nonnull imp)
OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 5.0);
#endif
// API to only be called by classes that provide their own reference count storage
OBJC_EXPORT void
_objc_deallocOnMainThreadHelper(void * _Nullable context)
OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0, 2.0);
#if __OBJC__
// Declarations for internal methods used for custom weak reference
// implementations. These declarations ensure that the compiler knows
// to exclude these methods from NS_DIRECT_MEMBERS. Do NOT implement
// these methods unless you really know what you're doing.
@interface NSObject ()
- (BOOL)_tryRetain;
- (BOOL)_isDeallocating;
@end
#endif
// On async versus sync deallocation and the _dealloc2main flag
//
// Theory:
@ -944,7 +1122,7 @@ typedef enum {
} \
} \
-(NSUInteger)retainCount { \
return (_rc_ivar + 2) >> 1; \
return (NSUInteger)(_rc_ivar + 2) >> 1; \
} \
-(BOOL)_tryRetain { \
__typeof__(_rc_ivar) _prev; \
@ -966,12 +1144,12 @@ typedef enum {
} else if (_rc_ivar < -2) { \
__builtin_trap(); /* BUG: over-release elsewhere */ \
} \
return _rc_ivar & 1; \
return (_rc_ivar & 1) != 0; \
}
#define _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC(_rc_ivar, _dealloc2main) \
_OBJC_SUPPORTED_INLINE_REFCNT_LOGIC_BLOCK(_rc_ivar, (^(id _self_ __attribute__((unused))) { \
if (_dealloc2main && !pthread_main_np()) { \
if ((_dealloc2main) && !pthread_main_np()) { \
return _OBJC_DEALLOC_OBJECT_LATER; \
} else { \
return _OBJC_DEALLOC_OBJECT_NOW; \
@ -981,6 +1159,25 @@ typedef enum {
#define _OBJC_SUPPORTED_INLINE_REFCNT(_rc_ivar) _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC(_rc_ivar, 0)
#define _OBJC_SUPPORTED_INLINE_REFCNT_WITH_DEALLOC2MAIN(_rc_ivar) _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC(_rc_ivar, 1)
// C cache_t wrappers for objcdt and the IMP caches test tool
struct cache_t;
struct bucket_t;
struct preopt_cache_t;
OBJC_EXPORT struct bucket_t * _Nonnull objc_cache_buckets(const struct cache_t * _Nonnull cache);
OBJC_EXPORT size_t objc_cache_bytesForCapacity(uint32_t cap);
OBJC_EXPORT uint32_t objc_cache_occupied(const struct cache_t * _Nonnull cache);
OBJC_EXPORT unsigned objc_cache_capacity(const struct cache_t * _Nonnull cache);
#if CONFIG_USE_PREOPT_CACHES
OBJC_EXPORT bool objc_cache_isConstantOptimizedCache(const struct cache_t * _Nonnull cache, bool strict, uintptr_t empty_addr);
OBJC_EXPORT unsigned objc_cache_preoptCapacity(const struct cache_t * _Nonnull cache);
OBJC_EXPORT Class _Nonnull objc_cache_preoptFallbackClass(const struct cache_t * _Nonnull cache);
OBJC_EXPORT const struct preopt_cache_t * _Nonnull objc_cache_preoptCache(const struct cache_t * _Nonnull cache);
#endif
__END_DECLS
#endif

View File

@ -24,11 +24,13 @@
#if LOCKDEBUG
extern void lockdebug_assert_all_locks_locked();
extern void lockdebug_assert_no_locks_locked();
extern void lockdebug_assert_no_locks_locked_except(std::initializer_list<void *> canBeLocked);
extern void lockdebug_setInForkPrepare(bool);
extern void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock);
#else
static constexpr inline void lockdebug_assert_all_locks_locked() { }
static constexpr inline void lockdebug_assert_no_locks_locked() { }
static constexpr inline void lockdebug_assert_no_locks_locked_except(std::initializer_list<void *> canBeLocked) { };
static constexpr inline void lockdebug_setInForkPrepare(bool) { }
static constexpr inline void lockdebug_lock_precedes_lock(const void *, const void *) { }
#endif
@ -40,12 +42,12 @@ extern void lockdebug_mutex_unlock(mutex_tt<true> *lock);
extern void lockdebug_mutex_assert_locked(mutex_tt<true> *lock);
extern void lockdebug_mutex_assert_unlocked(mutex_tt<true> *lock);
static constexpr inline void lockdebug_remember_mutex(mutex_tt<false> *lock) { }
static constexpr inline void lockdebug_mutex_lock(mutex_tt<false> *lock) { }
static constexpr inline void lockdebug_mutex_try_lock(mutex_tt<false> *lock) { }
static constexpr inline void lockdebug_mutex_unlock(mutex_tt<false> *lock) { }
static constexpr inline void lockdebug_mutex_assert_locked(mutex_tt<false> *lock) { }
static constexpr inline void lockdebug_mutex_assert_unlocked(mutex_tt<false> *lock) { }
static constexpr inline void lockdebug_remember_mutex(__unused mutex_tt<false> *lock) { }
static constexpr inline void lockdebug_mutex_lock(__unused mutex_tt<false> *lock) { }
static constexpr inline void lockdebug_mutex_try_lock(__unused mutex_tt<false> *lock) { }
static constexpr inline void lockdebug_mutex_unlock(__unused mutex_tt<false> *lock) { }
static constexpr inline void lockdebug_mutex_assert_locked(__unused mutex_tt<false> *lock) { }
static constexpr inline void lockdebug_mutex_assert_unlocked(__unused mutex_tt<false> *lock) { }
extern void lockdebug_remember_monitor(monitor_tt<true> *lock);
@ -55,12 +57,12 @@ extern void lockdebug_monitor_wait(monitor_tt<true> *lock);
extern void lockdebug_monitor_assert_locked(monitor_tt<true> *lock);
extern void lockdebug_monitor_assert_unlocked(monitor_tt<true> *lock);
static constexpr inline void lockdebug_remember_monitor(monitor_tt<false> *lock) { }
static constexpr inline void lockdebug_monitor_enter(monitor_tt<false> *lock) { }
static constexpr inline void lockdebug_monitor_leave(monitor_tt<false> *lock) { }
static constexpr inline void lockdebug_monitor_wait(monitor_tt<false> *lock) { }
static constexpr inline void lockdebug_monitor_assert_locked(monitor_tt<false> *lock) { }
static constexpr inline void lockdebug_monitor_assert_unlocked(monitor_tt<false> *lock) {}
static constexpr inline void lockdebug_remember_monitor(__unused monitor_tt<false> *lock) { }
static constexpr inline void lockdebug_monitor_enter(__unused monitor_tt<false> *lock) { }
static constexpr inline void lockdebug_monitor_leave(__unused monitor_tt<false> *lock) { }
static constexpr inline void lockdebug_monitor_wait(__unused monitor_tt<false> *lock) { }
static constexpr inline void lockdebug_monitor_assert_locked(__unused monitor_tt<false> *lock) { }
static constexpr inline void lockdebug_monitor_assert_unlocked(__unused monitor_tt<false> *lock) {}
extern void
@ -75,12 +77,12 @@ extern void
lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<true> *lock);
static constexpr inline void
lockdebug_remember_recursive_mutex(recursive_mutex_tt<false> *lock) { }
lockdebug_remember_recursive_mutex(__unused recursive_mutex_tt<false> *lock) { }
static constexpr inline void
lockdebug_recursive_mutex_lock(recursive_mutex_tt<false> *lock) { }
lockdebug_recursive_mutex_lock(__unused recursive_mutex_tt<false> *lock) { }
static constexpr inline void
lockdebug_recursive_mutex_unlock(recursive_mutex_tt<false> *lock) { }
lockdebug_recursive_mutex_unlock(__unused recursive_mutex_tt<false> *lock) { }
static constexpr inline void
lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt<false> *lock) { }
lockdebug_recursive_mutex_assert_locked(__unused recursive_mutex_tt<false> *lock) { }
static constexpr inline void
lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<false> *lock) { }
lockdebug_recursive_mutex_assert_unlocked(__unused recursive_mutex_tt<false> *lock) { }

View File

@ -321,10 +321,18 @@ lockdebug_assert_all_locks_locked()
void
lockdebug_assert_no_locks_locked()
{
lockdebug_assert_no_locks_locked_except({});
}
void lockdebug_assert_no_locks_locked_except(std::initializer_list<void *> canBeLocked)
{
auto& owned = ownedLocks();
for (const auto& l : AllLocks()) {
if (std::find(canBeLocked.begin(), canBeLocked.end(), l.first) != canBeLocked.end())
continue;
if (hasLock(owned, l.first, l.second.k)) {
_objc_fatal("lock %p:%d is incorrectly owned", l.first, l.second.k);
}

View File

@ -103,6 +103,12 @@ objc_object::isTaggedPointer()
return _objc_isTaggedPointer(this);
}
inline bool
objc_object::isTaggedPointerOrNil()
{
return _objc_isTaggedPointerOrNil(this);
}
inline bool
objc_object::isBasicTaggedPointer()
{
@ -121,7 +127,6 @@ objc_object::isExtTaggedPointer()
#else
// not SUPPORT_TAGGED_POINTERS
inline Class
objc_object::getIsa()
{
@ -141,6 +146,12 @@ objc_object::isTaggedPointer()
return false;
}
inline bool
objc_object::isTaggedPointerOrNil()
{
return !this;
}
inline bool
objc_object::isBasicTaggedPointer()
{
@ -160,19 +171,116 @@ objc_object::isExtTaggedPointer()
#if SUPPORT_NONPOINTER_ISA
// Set the class field in an isa. Takes both the class to set and
// a pointer to the object where the isa will ultimately be used.
// This is necessary to get the pointer signing right.
//
// Note: this method does not support setting an indexed isa. When
// indexed isas are in use, it can only be used to set the class of a
// raw isa.
inline void
isa_t::setClass(Class newCls, UNUSED_WITHOUT_PTRAUTH objc_object *obj)
{
// Match the conditional in isa.h.
#if __has_feature(ptrauth_calls) || TARGET_OS_SIMULATOR
# if ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_NONE
// No signing, just use the raw pointer.
uintptr_t signedCls = (uintptr_t)newCls;
# elif ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ONLY_SWIFT
// We're only signing Swift classes. Non-Swift classes just use
// the raw pointer
uintptr_t signedCls = (uintptr_t)newCls;
if (newCls->isSwiftStable())
signedCls = (uintptr_t)ptrauth_sign_unauthenticated((void *)newCls, ISA_SIGNING_KEY, ptrauth_blend_discriminator(obj, ISA_SIGNING_DISCRIMINATOR));
# elif ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ALL
// We're signing everything
uintptr_t signedCls = (uintptr_t)ptrauth_sign_unauthenticated((void *)newCls, ISA_SIGNING_KEY, ptrauth_blend_discriminator(obj, ISA_SIGNING_DISCRIMINATOR));
# else
# error Unknown isa signing mode.
# endif
shiftcls_and_sig = signedCls >> 3;
#elif SUPPORT_INDEXED_ISA
// Indexed isa only uses this method to set a raw pointer class.
// Setting an indexed class is handled separately.
cls = newCls;
#else // Nonpointer isa, no ptrauth
shiftcls = (uintptr_t)newCls >> 3;
#endif
}
// Get the class pointer out of an isa. When ptrauth is supported,
// this operation is optionally authenticated. Many code paths don't
// need the authentication, so it can be skipped in those cases for
// better performance.
//
// Note: this method does not support retrieving indexed isas. When
// indexed isas are in use, it can only be used to retrieve the class
// of a raw isa.
#if SUPPORT_INDEXED_ISA || (ISA_SIGNING_AUTH_MODE != ISA_SIGNING_AUTH)
#define MAYBE_UNUSED_AUTHENTICATED_PARAM __attribute__((unused))
#else
#define MAYBE_UNUSED_AUTHENTICATED_PARAM UNUSED_WITHOUT_PTRAUTH
#endif
inline Class
objc_object::ISA()
isa_t::getClass(MAYBE_UNUSED_AUTHENTICATED_PARAM bool authenticated) {
#if SUPPORT_INDEXED_ISA
return cls;
#else
uintptr_t clsbits = bits;
# if __has_feature(ptrauth_calls)
# if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH
// Most callers aren't security critical, so skip the
// authentication unless they ask for it. Message sending and
// cache filling are protected by the auth code in msgSend.
if (authenticated) {
// Mask off all bits besides the class pointer and signature.
clsbits &= ISA_MASK;
if (clsbits == 0)
return Nil;
clsbits = (uintptr_t)ptrauth_auth_data((void *)clsbits, ISA_SIGNING_KEY, ptrauth_blend_discriminator(this, ISA_SIGNING_DISCRIMINATOR));
} else {
// If not authenticating, strip using the precomputed class mask.
clsbits &= objc_debug_isa_class_mask;
}
# else
// If not authenticating, strip using the precomputed class mask.
clsbits &= objc_debug_isa_class_mask;
# endif
# else
clsbits &= ISA_MASK;
# endif
return (Class)clsbits;
#endif
}
inline Class
isa_t::getDecodedClass(bool authenticated) {
#if SUPPORT_INDEXED_ISA
if (nonpointer) {
return classForIndex(indexcls);
}
return (Class)cls;
#else
return getClass(authenticated);
#endif
}
inline Class
objc_object::ISA(bool authenticated)
{
ASSERT(!isTaggedPointer());
#if SUPPORT_INDEXED_ISA
if (isa.nonpointer) {
uintptr_t slot = isa.indexcls;
return classForIndex((unsigned)slot);
}
return (Class)isa.bits;
#else
return (Class)(isa.bits & ISA_MASK);
#endif
return isa.getDecodedClass(authenticated);
}
inline Class
@ -220,18 +328,25 @@ objc_object::initInstanceIsa(Class cls, bool hasCxxDtor)
initIsa(cls, true, hasCxxDtor);
}
#if !SUPPORT_INDEXED_ISA && !ISA_HAS_CXX_DTOR_BIT
#define UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT __attribute__((unused))
#else
#define UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT
#endif
inline void
objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor)
objc_object::initIsa(Class cls, bool nonpointer, UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT bool hasCxxDtor)
{
ASSERT(!isTaggedPointer());
isa_t newisa(0);
if (!nonpointer) {
isa = isa_t((uintptr_t)cls);
newisa.setClass(cls, this);
} else {
ASSERT(!DisableNonpointerIsa);
ASSERT(!cls->instancesRequireRawIsa());
isa_t newisa(0);
#if SUPPORT_INDEXED_ISA
ASSERT(cls->classArrayIndex() > 0);
@ -244,9 +359,13 @@ objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor)
newisa.bits = ISA_MAGIC_VALUE;
// isa.magic is part of ISA_MAGIC_VALUE
// isa.nonpointer is part of ISA_MAGIC_VALUE
# if ISA_HAS_CXX_DTOR_BIT
newisa.has_cxx_dtor = hasCxxDtor;
newisa.shiftcls = (uintptr_t)cls >> 3;
# endif
newisa.setClass(cls, this);
#endif
newisa.extra_rc = 1;
}
// This write must be performed in a single store in some cases
// (for example when realizing a class because other threads
@ -256,7 +375,6 @@ objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor)
// ...but not too atomic because we don't want to hurt instantiation
isa = newisa;
}
}
inline Class
@ -270,34 +388,46 @@ objc_object::changeIsa(Class newCls)
ASSERT(!isTaggedPointer());
isa_t oldisa;
isa_t newisa;
isa_t newisa(0);
bool sideTableLocked = false;
bool transcribeToSideTable = false;
oldisa = LoadExclusive(&isa.bits);
do {
transcribeToSideTable = false;
oldisa = LoadExclusive(&isa.bits);
if ((oldisa.bits == 0 || oldisa.nonpointer) &&
!newCls->isFuture() && newCls->canAllocNonpointer())
{
// 0 -> nonpointer
// nonpointer -> nonpointer
#if SUPPORT_INDEXED_ISA
if (oldisa.bits == 0) newisa.bits = ISA_INDEX_MAGIC_VALUE;
else newisa = oldisa;
if (oldisa.bits == 0) {
newisa.bits = ISA_INDEX_MAGIC_VALUE;
newisa.extra_rc = 1;
} else {
newisa = oldisa;
}
// isa.magic is part of ISA_MAGIC_VALUE
// isa.nonpointer is part of ISA_MAGIC_VALUE
newisa.has_cxx_dtor = newCls->hasCxxDtor();
ASSERT(newCls->classArrayIndex() > 0);
newisa.indexcls = (uintptr_t)newCls->classArrayIndex();
#else
if (oldisa.bits == 0) newisa.bits = ISA_MAGIC_VALUE;
else newisa = oldisa;
if (oldisa.bits == 0) {
newisa.bits = ISA_MAGIC_VALUE;
newisa.extra_rc = 1;
}
else {
newisa = oldisa;
}
// isa.magic is part of ISA_MAGIC_VALUE
// isa.nonpointer is part of ISA_MAGIC_VALUE
# if ISA_HAS_CXX_DTOR_BIT
newisa.has_cxx_dtor = newCls->hasCxxDtor();
newisa.shiftcls = (uintptr_t)newCls >> 3;
# endif
newisa.setClass(newCls, this);
#endif
}
else if (oldisa.nonpointer) {
@ -308,37 +438,27 @@ objc_object::changeIsa(Class newCls)
if (!sideTableLocked) sidetable_lock();
sideTableLocked = true;
transcribeToSideTable = true;
newisa.cls = newCls;
newisa.setClass(newCls, this);
}
else {
// raw pointer -> raw pointer
newisa.cls = newCls;
newisa.setClass(newCls, this);
}
} while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
} while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
if (transcribeToSideTable) {
// Copy oldisa's retain count et al to side table.
// oldisa.has_assoc: nothing to do
// oldisa.has_cxx_dtor: nothing to do
sidetable_moveExtraRC_nolock(oldisa.extra_rc,
oldisa.deallocating,
oldisa.isDeallocating(),
oldisa.weakly_referenced);
}
if (sideTableLocked) sidetable_unlock();
if (oldisa.nonpointer) {
#if SUPPORT_INDEXED_ISA
return classForIndex(oldisa.indexcls);
#else
return (Class)((uintptr_t)oldisa.shiftcls << 3);
#endif
return oldisa.getDecodedClass(false);
}
else {
return oldisa.cls;
}
}
inline bool
objc_object::hasAssociatedObjects()
@ -354,15 +474,22 @@ objc_object::setHasAssociatedObjects()
{
if (isTaggedPointer()) return;
retry:
isa_t oldisa = LoadExclusive(&isa.bits);
isa_t newisa = oldisa;
if (slowpath(!hasNonpointerIsa() && ISA()->hasCustomRR()) && !ISA()->isFuture() && !ISA()->isMetaClass()) {
void(*setAssoc)(id, SEL) = (void(*)(id, SEL)) object_getMethodImplementation((id)this, @selector(_noteAssociatedObjects));
if ((IMP)setAssoc != _objc_msgForward) {
(*setAssoc)((id)this, @selector(_noteAssociatedObjects));
}
}
isa_t newisa, oldisa = LoadExclusive(&isa.bits);
do {
newisa = oldisa;
if (!newisa.nonpointer || newisa.has_assoc) {
ClearExclusive(&isa.bits);
return;
}
newisa.has_assoc = true;
if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
} while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
}
@ -378,9 +505,9 @@ objc_object::isWeaklyReferenced()
inline void
objc_object::setWeaklyReferenced_nolock()
{
retry:
isa_t oldisa = LoadExclusive(&isa.bits);
isa_t newisa = oldisa;
isa_t newisa, oldisa = LoadExclusive(&isa.bits);
do {
newisa = oldisa;
if (slowpath(!newisa.nonpointer)) {
ClearExclusive(&isa.bits);
sidetable_setWeaklyReferenced_nolock();
@ -391,7 +518,7 @@ objc_object::setWeaklyReferenced_nolock()
return;
}
newisa.weakly_referenced = true;
if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
} while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
}
@ -399,8 +526,12 @@ inline bool
objc_object::hasCxxDtor()
{
ASSERT(!isTaggedPointer());
if (isa.nonpointer) return isa.has_cxx_dtor;
else return isa.cls->hasCxxDtor();
#if ISA_HAS_CXX_DTOR_BIT
if (isa.nonpointer)
return isa.has_cxx_dtor;
else
#endif
return ISA()->hasCxxDtor();
}
@ -409,7 +540,7 @@ inline bool
objc_object::rootIsDeallocating()
{
if (isTaggedPointer()) return false;
if (isa.nonpointer) return isa.deallocating;
if (isa.nonpointer) return isa.isDeallocating();
return sidetable_isDeallocating();
}
@ -438,7 +569,11 @@ objc_object::rootDealloc()
if (fastpath(isa.nonpointer &&
!isa.weakly_referenced &&
!isa.has_assoc &&
#if ISA_HAS_CXX_DTOR_BIT
!isa.has_cxx_dtor &&
#else
!isa.getClass(false)->hasCxxDtor() &&
#endif
!isa.has_sidetable_rc))
{
assert(!sidetable_present());
@ -449,6 +584,8 @@ objc_object::rootDealloc()
}
}
extern explicit_atomic<id(*)(id)> swiftRetain;
extern explicit_atomic<void(*)(id)> swiftRelease;
// Equivalent to calling [this retain], with shortcuts if there is no override
inline id
@ -456,14 +593,9 @@ objc_object::retain()
{
ASSERT(!isTaggedPointer());
if (fastpath(!ISA()->hasCustomRR())) {
return rootRetain();
return rootRetain(false, RRVariant::FastOrMsgSend);
}
return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain));
}
// Base retain implementation, ignoring overrides.
// This does not check isa.fast_rr; if there is an RR override then
// it was already called and it chose to call [super retain].
@ -476,19 +608,19 @@ objc_object::retain()
ALWAYS_INLINE id
objc_object::rootRetain()
{
return rootRetain(false, false);
return rootRetain(false, RRVariant::Fast);
}
ALWAYS_INLINE bool
objc_object::rootTryRetain()
{
return rootRetain(true, false) ? true : false;
return rootRetain(true, RRVariant::Fast) ? true : false;
}
ALWAYS_INLINE id
objc_object::rootRetain(bool tryRetain, bool handleOverflow)
objc_object::rootRetain(bool tryRetain, objc_object::RRVariant variant)
{
if (isTaggedPointer()) return (id)this;
if (slowpath(isTaggedPointer())) return (id)this;
bool sideTableLocked = false;
bool transcribeToSideTable = false;
@ -496,29 +628,56 @@ objc_object::rootRetain(bool tryRetain, bool handleOverflow)
isa_t oldisa;
isa_t newisa;
oldisa = LoadExclusive(&isa.bits);
if (variant == RRVariant::FastOrMsgSend) {
// These checks are only meaningful for objc_retain()
// They are here so that we avoid a re-load of the isa.
if (slowpath(oldisa.getDecodedClass(false)->hasCustomRR())) {
ClearExclusive(&isa.bits);
if (oldisa.getDecodedClass(false)->canCallSwiftRR()) {
return swiftRetain.load(memory_order_relaxed)((id)this);
}
return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain));
}
}
if (slowpath(!oldisa.nonpointer)) {
// a Class is a Class forever, so we can perform this check once
// outside of the CAS loop
if (oldisa.getDecodedClass(false)->isMetaClass()) {
ClearExclusive(&isa.bits);
return (id)this;
}
}
do {
transcribeToSideTable = false;
oldisa = LoadExclusive(&isa.bits);
newisa = oldisa;
if (slowpath(!newisa.nonpointer)) {
ClearExclusive(&isa.bits);
if (rawISA()->isMetaClass()) return (id)this;
if (!tryRetain && sideTableLocked) sidetable_unlock();
if (tryRetain) return sidetable_tryRetain() ? (id)this : nil;
else return sidetable_retain();
else return sidetable_retain(sideTableLocked);
}
// don't check newisa.fast_rr; we already called any RR overrides
if (slowpath(tryRetain && newisa.deallocating)) {
if (slowpath(newisa.isDeallocating())) {
ClearExclusive(&isa.bits);
if (!tryRetain && sideTableLocked) sidetable_unlock();
if (sideTableLocked) {
ASSERT(variant == RRVariant::Full);
sidetable_unlock();
}
if (slowpath(tryRetain)) {
return nil;
} else {
return (id)this;
}
}
uintptr_t carry;
newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++
if (slowpath(carry)) {
// newisa.extra_rc++ overflowed
if (!handleOverflow) {
if (variant != RRVariant::Full) {
ClearExclusive(&isa.bits);
return rootRetain_overflow(tryRetain);
}
@ -530,14 +689,20 @@ objc_object::rootRetain(bool tryRetain, bool handleOverflow)
newisa.extra_rc = RC_HALF;
newisa.has_sidetable_rc = true;
}
} while (slowpath(!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)));
} while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
if (variant == RRVariant::Full) {
if (slowpath(transcribeToSideTable)) {
// Copy the other half of the retain counts to the side table.
sidetable_addExtraRC_nolock(RC_HALF);
}
if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock();
} else {
ASSERT(!transcribeToSideTable);
ASSERT(!sideTableLocked);
}
return (id)this;
}
@ -548,12 +713,7 @@ objc_object::release()
{
ASSERT(!isTaggedPointer());
if (fastpath(!ISA()->hasCustomRR())) {
rootRelease();
return;
}
((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release));
rootRelease(true, RRVariant::FastOrMsgSend);
}
@ -570,35 +730,65 @@ objc_object::release()
ALWAYS_INLINE bool
objc_object::rootRelease()
{
return rootRelease(true, false);
return rootRelease(true, RRVariant::Fast);
}
ALWAYS_INLINE bool
objc_object::rootReleaseShouldDealloc()
{
return rootRelease(false, false);
return rootRelease(false, RRVariant::Fast);
}
ALWAYS_INLINE bool
objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
objc_object::rootRelease(bool performDealloc, objc_object::RRVariant variant)
{
if (isTaggedPointer()) return false;
if (slowpath(isTaggedPointer())) return false;
bool sideTableLocked = false;
isa_t oldisa;
isa_t newisa;
isa_t newisa, oldisa;
oldisa = LoadExclusive(&isa.bits);
if (variant == RRVariant::FastOrMsgSend) {
// These checks are only meaningful for objc_release()
// They are here so that we avoid a re-load of the isa.
if (slowpath(oldisa.getDecodedClass(false)->hasCustomRR())) {
ClearExclusive(&isa.bits);
if (oldisa.getDecodedClass(false)->canCallSwiftRR()) {
swiftRelease.load(memory_order_relaxed)((id)this);
return true;
}
((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release));
return true;
}
}
if (slowpath(!oldisa.nonpointer)) {
// a Class is a Class forever, so we can perform this check once
// outside of the CAS loop
if (oldisa.getDecodedClass(false)->isMetaClass()) {
ClearExclusive(&isa.bits);
return false;
}
}
retry:
do {
oldisa = LoadExclusive(&isa.bits);
newisa = oldisa;
if (slowpath(!newisa.nonpointer)) {
ClearExclusive(&isa.bits);
if (rawISA()->isMetaClass()) return false;
if (sideTableLocked) sidetable_unlock();
return sidetable_release(performDealloc);
return sidetable_release(sideTableLocked, performDealloc);
}
if (slowpath(newisa.isDeallocating())) {
ClearExclusive(&isa.bits);
if (sideTableLocked) {
ASSERT(variant == RRVariant::Full);
sidetable_unlock();
}
return false;
}
// don't check newisa.fast_rr; we already called any RR overrides
uintptr_t carry;
newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc--
@ -606,10 +796,16 @@ objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
// don't ClearExclusive()
goto underflow;
}
} while (slowpath(!StoreReleaseExclusive(&isa.bits,
oldisa.bits, newisa.bits)));
} while (slowpath(!StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
if (slowpath(newisa.isDeallocating()))
goto deallocate;
if (variant == RRVariant::Full) {
if (slowpath(sideTableLocked)) sidetable_unlock();
} else {
ASSERT(!sideTableLocked);
}
return false;
underflow:
@ -619,7 +815,7 @@ objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
newisa = oldisa;
if (slowpath(newisa.has_sidetable_rc)) {
if (!handleUnderflow) {
if (variant != RRVariant::Full) {
ClearExclusive(&isa.bits);
return rootRelease_underflow(performDealloc);
}
@ -632,35 +828,37 @@ objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
sideTableLocked = true;
// Need to start over to avoid a race against
// the nonpointer -> raw pointer transition.
oldisa = LoadExclusive(&isa.bits);
goto retry;
}
// Try to remove some retain counts from the side table.
size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF);
auto borrow = sidetable_subExtraRC_nolock(RC_HALF);
// To avoid races, has_sidetable_rc must remain set
// even if the side table count is now zero.
bool emptySideTable = borrow.remaining == 0; // we'll clear the side table if no refcounts remain there
if (borrowed > 0) {
if (borrow.borrowed > 0) {
// Side table retain count decreased.
// Try to add them to the inline count.
newisa.extra_rc = borrowed - 1; // redo the original decrement too
bool stored = StoreReleaseExclusive(&isa.bits,
oldisa.bits, newisa.bits);
if (!stored) {
bool didTransitionToDeallocating = false;
newisa.extra_rc = borrow.borrowed - 1; // redo the original decrement too
newisa.has_sidetable_rc = !emptySideTable;
bool stored = StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits);
if (!stored && oldisa.nonpointer) {
// Inline update failed.
// Try it again right now. This prevents livelock on LL/SC
// architectures where the side table access itself may have
// dropped the reservation.
isa_t oldisa2 = LoadExclusive(&isa.bits);
isa_t newisa2 = oldisa2;
if (newisa2.nonpointer) {
uintptr_t overflow;
newisa2.bits =
addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow);
newisa.bits =
addc(oldisa.bits, RC_ONE * (borrow.borrowed-1), 0, &overflow);
newisa.has_sidetable_rc = !emptySideTable;
if (!overflow) {
stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits,
newisa2.bits);
stored = StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits);
if (stored) {
didTransitionToDeallocating = newisa.isDeallocating();
}
}
}
@ -668,32 +866,31 @@ objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
if (!stored) {
// Inline update failed.
// Put the retains back in the side table.
sidetable_addExtraRC_nolock(borrowed);
ClearExclusive(&isa.bits);
sidetable_addExtraRC_nolock(borrow.borrowed);
oldisa = LoadExclusive(&isa.bits);
goto retry;
}
// Decrement successful after borrowing from side table.
// This decrement cannot be the deallocating decrement - the side
// table lock and has_sidetable_rc bit ensure that if everyone
// else tried to -release while we worked, the last one would block.
sidetable_unlock();
if (emptySideTable)
sidetable_clearExtraRC_nolock();
if (!didTransitionToDeallocating) {
if (slowpath(sideTableLocked)) sidetable_unlock();
return false;
}
}
else {
// Side table is empty after all. Fall-through to the dealloc path.
}
}
deallocate:
// Really deallocate.
if (slowpath(newisa.deallocating)) {
ClearExclusive(&isa.bits);
if (sideTableLocked) sidetable_unlock();
return overrelease_error();
// does not actually return
}
newisa.deallocating = true;
if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
ASSERT(newisa.isDeallocating());
ASSERT(isa.isDeallocating());
if (slowpath(sideTableLocked)) sidetable_unlock();
@ -736,10 +933,9 @@ objc_object::rootRetainCount()
if (isTaggedPointer()) return (uintptr_t)this;
sidetable_lock();
isa_t bits = LoadExclusive(&isa.bits);
ClearExclusive(&isa.bits);
isa_t bits = __c11_atomic_load((_Atomic uintptr_t *)&isa.bits, __ATOMIC_RELAXED);
if (bits.nonpointer) {
uintptr_t rc = 1 + bits.extra_rc;
uintptr_t rc = bits.extra_rc;
if (bits.has_sidetable_rc) {
rc += sidetable_getExtraRC_nolock();
}
@ -756,12 +952,29 @@ objc_object::rootRetainCount()
#else
// not SUPPORT_NONPOINTER_ISA
inline void
isa_t::setClass(Class cls, objc_object *obj)
{
this->cls = cls;
}
inline Class
objc_object::ISA()
isa_t::getClass(bool authenticated __unused)
{
return cls;
}
inline Class
isa_t::getDecodedClass(bool authenticated)
{
return getClass(authenticated);
}
inline Class
objc_object::ISA(bool authenticated __unused)
{
ASSERT(!isTaggedPointer());
return isa.cls;
return isa.getClass(/*authenticated*/false);
}
inline Class
@ -781,7 +994,7 @@ inline void
objc_object::initIsa(Class cls)
{
ASSERT(!isTaggedPointer());
isa = (uintptr_t)cls;
isa.setClass(cls, this);
}
@ -823,17 +1036,16 @@ objc_object::changeIsa(Class cls)
ASSERT(!isTaggedPointer());
isa_t oldisa, newisa;
newisa.cls = cls;
do {
oldisa = LoadExclusive(&isa.bits);
} while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
isa_t newisa, oldisa;
newisa.setClass(cls, this);
oldisa.bits = __c11_atomic_exchange((_Atomic uintptr_t *)&isa.bits, newisa.bits, __ATOMIC_RELAXED);
if (oldisa.cls && oldisa.cls->instancesHaveAssociatedObjects()) {
Class oldcls = oldisa.getDecodedClass(/*authenticated*/false);
if (oldcls && oldcls->instancesHaveAssociatedObjects()) {
cls->setInstancesHaveAssociatedObjects();
}
return oldisa.cls;
return oldcls;
}
@ -873,7 +1085,7 @@ inline bool
objc_object::hasCxxDtor()
{
ASSERT(!isTaggedPointer());
return isa.cls->hasCxxDtor();
return isa.getClass(/*authenticated*/false)->hasCxxDtor();
}
@ -949,14 +1161,14 @@ inline bool
objc_object::rootRelease()
{
if (isTaggedPointer()) return false;
return sidetable_release(true);
return sidetable_release();
}
inline bool
objc_object::rootReleaseShouldDealloc()
{
if (isTaggedPointer()) return false;
return sidetable_release(false);
return sidetable_release(/*locked*/false, /*performDealloc*/false);
}

View File

@ -27,13 +27,13 @@
*/
#include "objc-private.h"
#include "objc-os.h"
#include "objc-file.h"
#if !SUPPORT_PREOPT
// Preoptimization not supported on this platform.
struct objc_selopt_t;
bool isPreoptimized(void)
{
return false;
@ -64,16 +64,6 @@ bool header_info::hasPreoptimizedProtocols() const
return false;
}
objc_selopt_t *preoptimizedSelectors(void)
{
return nil;
}
bool sharedCacheSupportsProtocolRoots(void)
{
return false;
}
Protocol *getPreoptimizedProtocol(const char *name)
{
return nil;
@ -123,7 +113,6 @@ void preopt_init(void)
#include <objc-shared-cache.h>
using objc_opt::objc_stringhash_offset_t;
using objc_opt::objc_protocolopt_t;
using objc_opt::objc_protocolopt2_t;
using objc_opt::objc_clsopt_t;
using objc_opt::objc_headeropt_ro_t;
@ -141,6 +130,62 @@ static bool preoptimized;
extern const objc_opt_t _objc_opt_data; // in __TEXT, __objc_opt_ro
namespace objc_opt {
struct objc_headeropt_ro_t {
uint32_t count;
uint32_t entsize;
header_info headers[0]; // sorted by mhdr address
header_info& getOrEnd(uint32_t i) const {
ASSERT(i <= count);
return *(header_info *)((uint8_t *)&headers + (i * entsize));
}
header_info& get(uint32_t i) const {
ASSERT(i < count);
return *(header_info *)((uint8_t *)&headers + (i * entsize));
}
uint32_t index(const header_info* hi) const {
const header_info* begin = &get(0);
const header_info* end = &getOrEnd(count);
ASSERT(hi >= begin && hi < end);
return (uint32_t)(((uintptr_t)hi - (uintptr_t)begin) / entsize);
}
header_info *get(const headerType *mhdr)
{
int32_t start = 0;
int32_t end = count;
while (start <= end) {
int32_t i = (start+end)/2;
header_info &hi = get(i);
if (mhdr == hi.mhdr()) return &hi;
else if (mhdr < hi.mhdr()) end = i-1;
else start = i+1;
}
#if DEBUG
for (uint32_t i = 0; i < count; i++) {
header_info &hi = get(i);
if (mhdr == hi.mhdr()) {
_objc_fatal("failed to find header %p (%d/%d)",
mhdr, i, count);
}
}
#endif
return nil;
}
};
struct objc_headeropt_rw_t {
uint32_t count;
uint32_t entsize;
header_info_rw headers[0]; // sorted by mhdr address
};
};
/***********************************************************************
* Return YES if we have a valid optimized shared cache.
**********************************************************************/
@ -199,38 +244,114 @@ bool header_info::hasPreoptimizedProtocols() const
return info()->optimizedByDyld() || info()->optimizedByDyldClosure();
}
objc_selopt_t *preoptimizedSelectors(void)
bool header_info::hasPreoptimizedSectionLookups() const
{
return opt ? opt->selopt() : nil;
objc_opt::objc_headeropt_ro_t *hinfoRO = opt->headeropt_ro();
if (hinfoRO->entsize == (2 * sizeof(intptr_t)))
return NO;
return YES;
}
bool sharedCacheSupportsProtocolRoots(void)
const classref_t *header_info::nlclslist(size_t *outCount) const
{
return (opt != nil) && (opt->protocolopt2() != nil);
#if __OBJC2__
// This field is new, so temporarily be resilient to the shared cache
// not generating it
if (isPreoptimized() && hasPreoptimizedSectionLookups()) {
*outCount = nlclslist_count;
const classref_t *list = (const classref_t *)(((intptr_t)&nlclslist_offset) + nlclslist_offset);
#if DEBUG
size_t debugCount;
assert((list == _getObjc2NonlazyClassList(mhdr(), &debugCount)) && (*outCount == debugCount));
#endif
return list;
}
return _getObjc2NonlazyClassList(mhdr(), outCount);
#else
return NULL;
#endif
}
category_t * const *header_info::nlcatlist(size_t *outCount) const
{
#if __OBJC2__
// This field is new, so temporarily be resilient to the shared cache
// not generating it
if (isPreoptimized() && hasPreoptimizedSectionLookups()) {
*outCount = nlcatlist_count;
category_t * const *list = (category_t * const *)(((intptr_t)&nlcatlist_offset) + nlcatlist_offset);
#if DEBUG
size_t debugCount;
assert((list == _getObjc2NonlazyCategoryList(mhdr(), &debugCount)) && (*outCount == debugCount));
#endif
return list;
}
return _getObjc2NonlazyCategoryList(mhdr(), outCount);
#else
return NULL;
#endif
}
category_t * const *header_info::catlist(size_t *outCount) const
{
#if __OBJC2__
// This field is new, so temporarily be resilient to the shared cache
// not generating it
if (isPreoptimized() && hasPreoptimizedSectionLookups()) {
*outCount = catlist_count;
category_t * const *list = (category_t * const *)(((intptr_t)&catlist_offset) + catlist_offset);
#if DEBUG
size_t debugCount;
assert((list == _getObjc2CategoryList(mhdr(), &debugCount)) && (*outCount == debugCount));
#endif
return list;
}
return _getObjc2CategoryList(mhdr(), outCount);
#else
return NULL;
#endif
}
category_t * const *header_info::catlist2(size_t *outCount) const
{
#if __OBJC2__
// This field is new, so temporarily be resilient to the shared cache
// not generating it
if (isPreoptimized() && hasPreoptimizedSectionLookups()) {
*outCount = catlist2_count;
category_t * const *list = (category_t * const *)(((intptr_t)&catlist2_offset) + catlist2_offset);
#if DEBUG
size_t debugCount;
assert((list == _getObjc2CategoryList2(mhdr(), &debugCount)) && (*outCount == debugCount));
#endif
return list;
}
return _getObjc2CategoryList2(mhdr(), outCount);
#else
return NULL;
#endif
}
Protocol *getSharedCachePreoptimizedProtocol(const char *name)
{
// Look in the new table if we have it
if (objc_protocolopt2_t *protocols2 = opt ? opt->protocolopt2() : nil) {
// Note, we have to pass the lambda directly here as otherwise we would try
// message copy and autorelease.
return (Protocol *)protocols2->getProtocol(name, [](const void* hi) -> bool {
return ((header_info *)hi)->isLoaded();
});
}
objc_protocolopt_t *protocols = opt ? opt->protocolopt() : nil;
objc_protocolopt2_t *protocols = opt ? opt->protocolopt2() : nil;
if (!protocols) return nil;
return (Protocol *)protocols->getProtocol(name);
// Note, we have to pass the lambda directly here as otherwise we would try
// message copy and autorelease.
return (Protocol *)protocols->getProtocol(name, [](const void* hi) -> bool {
return ((header_info *)hi)->isLoaded();
});
}
Protocol *getPreoptimizedProtocol(const char *name)
{
objc_protocolopt2_t *protocols = opt ? opt->protocolopt2() : nil;
if (!protocols) return nil;
// Try table from dyld closure first. It was built to ignore the dupes it
// knows will come from the cache, so anything left in here was there when
// we launched
@ -354,47 +475,6 @@ Class* copyPreoptimizedClasses(const char *name, int *outCount)
return nil;
}
namespace objc_opt {
struct objc_headeropt_ro_t {
uint32_t count;
uint32_t entsize;
header_info headers[0]; // sorted by mhdr address
header_info *get(const headerType *mhdr)
{
ASSERT(entsize == sizeof(header_info));
int32_t start = 0;
int32_t end = count;
while (start <= end) {
int32_t i = (start+end)/2;
header_info *hi = headers+i;
if (mhdr == hi->mhdr()) return hi;
else if (mhdr < hi->mhdr()) end = i-1;
else start = i+1;
}
#if DEBUG
for (uint32_t i = 0; i < count; i++) {
header_info *hi = headers+i;
if (mhdr == hi->mhdr()) {
_objc_fatal("failed to find header %p (%d/%d)",
mhdr, i, count);
}
}
#endif
return nil;
}
};
struct objc_headeropt_rw_t {
uint32_t count;
uint32_t entsize;
header_info_rw headers[0]; // sorted by mhdr address
};
};
header_info *preoptimizedHinfoForHeader(const headerType *mhdr)
{
@ -422,7 +502,7 @@ header_info_rw *getPreoptimizedHeaderRW(const struct header_info *const hdr)
_objc_fatal("preoptimized header_info missing for %s (%p %p %p)",
hdr->fname(), hdr, hinfoRO, hinfoRW);
}
int32_t index = (int32_t)(hdr - hinfoRO->headers);
int32_t index = hinfoRO->index(hdr);
ASSERT(hinfoRW->entsize == sizeof(header_info_rw));
return &hinfoRW->headers[index];
}
@ -435,7 +515,7 @@ void preopt_init(void)
const uintptr_t start = (uintptr_t)_dyld_get_shared_cache_range(&length);
if (start) {
objc::dataSegmentsRanges.add(start, start + length);
objc::dataSegmentsRanges.setSharedCacheRange(start, start + length);
}
// `opt` not set at compile time in order to detect too-early usage

View File

@ -93,6 +93,16 @@ struct explicit_atomic : public std::atomic<T> {
}
};
namespace objc {
static inline uintptr_t mask16ShiftBits(uint16_t mask)
{
// returns by how much 0xffff must be shifted "right" to return mask
uintptr_t maskShift = __builtin_clz(mask) - 16;
ASSERT((0xffff >> maskShift) == mask);
return maskShift;
}
}
#if TARGET_OS_MAC
# define OS_UNFAIR_LOCK_INLINE 1
@ -175,17 +185,25 @@ LoadExclusive(uintptr_t *src)
static ALWAYS_INLINE
bool
StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
StoreExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
{
return !__builtin_arm_strex(value, dst);
if (slowpath(__builtin_arm_strex(value, dst))) {
*oldvalue = LoadExclusive(dst);
return false;
}
return true;
}
static ALWAYS_INLINE
bool
StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
StoreReleaseExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
{
return !__builtin_arm_stlex(value, dst);
if (slowpath(__builtin_arm_stlex(value, dst))) {
*oldvalue = LoadExclusive(dst);
return false;
}
return true;
}
static ALWAYS_INLINE
@ -206,17 +224,17 @@ LoadExclusive(uintptr_t *src)
static ALWAYS_INLINE
bool
StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
StoreExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
{
return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, &oldvalue, value, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, oldvalue, value, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
static ALWAYS_INLINE
bool
StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
StoreReleaseExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value)
{
return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, &oldvalue, value, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, oldvalue, value, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
}
static ALWAYS_INLINE
@ -726,7 +744,7 @@ class mutex_tt : nocopy_t {
lockdebug_remember_mutex(this);
}
constexpr mutex_tt(const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { }
constexpr mutex_tt(__unused const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { }
void lock() {
lockdebug_mutex_lock(this);
@ -762,7 +780,7 @@ class mutex_tt : nocopy_t {
// Address-ordered lock discipline for a pair of locks.
static void lockTwo(mutex_tt *lock1, mutex_tt *lock2) {
if (lock1 < lock2) {
if ((uintptr_t)lock1 < (uintptr_t)lock2) {
lock1->lock();
lock2->lock();
} else {
@ -812,7 +830,7 @@ class recursive_mutex_tt : nocopy_t {
lockdebug_remember_recursive_mutex(this);
}
constexpr recursive_mutex_tt(const fork_unsafe_lock_t unsafe)
constexpr recursive_mutex_tt(__unused const fork_unsafe_lock_t unsafe)
: mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT)
{ }
@ -877,7 +895,7 @@ class monitor_tt {
lockdebug_remember_monitor(this);
}
monitor_tt(const fork_unsafe_lock_t unsafe)
monitor_tt(__unused const fork_unsafe_lock_t unsafe)
: mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
{ }
@ -1019,63 +1037,20 @@ ustrdupMaybeNil(const uint8_t *str)
// OS version checking:
//
// sdkVersion()
// DYLD_OS_VERSION(mac, ios, tv, watch, bridge)
// sdkIsOlderThan(mac, ios, tv, watch, bridge)
// sdkIsAtLeast(mac, ios, tv, watch, bridge)
//
// This version order matches OBJC_AVAILABLE.
#if TARGET_OS_OSX
# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_MACOSX_VERSION_##x
# define sdkVersion() dyld_get_program_sdk_version()
#elif TARGET_OS_IOS
# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##i
# define sdkVersion() dyld_get_program_sdk_version()
#elif TARGET_OS_TV
// dyld does not currently have distinct constants for tvOS
# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
# define sdkVersion() dyld_get_program_sdk_version()
#elif TARGET_OS_BRIDGE
# if TARGET_OS_WATCH
# error bridgeOS 1.0 not supported
# endif
// fixme don't need bridgeOS versioning yet
# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t
# define sdkVersion() dyld_get_program_sdk_bridge_os_version()
#elif TARGET_OS_WATCH
# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_WATCHOS_VERSION_##w
// watchOS has its own API for compatibility reasons
# define sdkVersion() dyld_get_program_sdk_watch_os_version()
#else
# error unknown OS
#endif
#define sdkIsOlderThan(x, i, t, w, b) \
(sdkVersion() < DYLD_OS_VERSION(x, i, t, w, b))
//
// NOTE: prefer dyld_program_sdk_at_least when possible
#define sdkIsAtLeast(x, i, t, w, b) \
(sdkVersion() >= DYLD_OS_VERSION(x, i, t, w, b))
(dyld_program_sdk_at_least(dyld_platform_version_macOS_ ## x) || \
dyld_program_sdk_at_least(dyld_platform_version_iOS_ ## i) || \
dyld_program_sdk_at_least(dyld_platform_version_tvOS_ ## t) || \
dyld_program_sdk_at_least(dyld_platform_version_watchOS_ ## w) || \
dyld_program_sdk_at_least(dyld_platform_version_bridgeOS_ ## b))
// Allow bare 0 to be used in DYLD_OS_VERSION() and sdkIsOlderThan()
#define DYLD_MACOSX_VERSION_0 0
#define DYLD_IOS_VERSION_0 0
#define DYLD_TVOS_VERSION_0 0
#define DYLD_WATCHOS_VERSION_0 0
#define DYLD_BRIDGEOS_VERSION_0 0
// Pretty-print a DYLD_*_VERSION_* constant.
#define SDK_FORMAT "%hu.%hhu.%hhu"
#define FORMAT_SDK(v) \
(unsigned short)(((uint32_t)(v))>>16), \
(unsigned char)(((uint32_t)(v))>>8), \
(unsigned char)(((uint32_t)(v))>>0)
#ifndef __BUILDING_OBJCDT__
// fork() safety requires careful tracking of all locks.
// Our custom lock types check this in debug builds.
// Disallow direct use of all other lock types.
@ -1083,6 +1058,6 @@ typedef __darwin_pthread_mutex_t pthread_mutex_t UNAVAILABLE_ATTRIBUTE;
typedef __darwin_pthread_rwlock_t pthread_rwlock_t UNAVAILABLE_ATTRIBUTE;
typedef int32_t OSSpinLock UNAVAILABLE_ATTRIBUTE;
typedef struct os_unfair_lock_s os_unfair_lock UNAVAILABLE_ATTRIBUTE;
#endif
#endif

View File

@ -28,7 +28,7 @@
#include "objc-private.h"
#include "objc-loadmethod.h"
#include "objc-cache.h"
#include "objc-bp-assist.h"
#if TARGET_OS_WIN32
@ -492,11 +492,16 @@ map_images_nolock(unsigned mhCount, const char * const mhPaths[],
if (mhdr->filetype == MH_EXECUTE) {
// Size some data structures based on main executable's size
#if __OBJC2__
// If dyld3 optimized the main executable, then there shouldn't
// be any selrefs needed in the dynamic map so we can just init
// to a 0 sized map
if ( !hi->hasPreoptimizedSelectors() ) {
size_t count;
_getObjc2SelectorRefs(hi, &count);
selrefCount += count;
_getObjc2MessageRefs(hi, &count);
selrefCount += count;
}
#else
_getObjcSelectorRefs(hi, &selrefCount);
#endif
@ -559,13 +564,12 @@ map_images_nolock(unsigned mhCount, const char * const mhPaths[],
// Disable +initialize fork safety if the app has a
// __DATA,__objc_fork_ok section.
if (dyld_get_program_sdk_version() < DYLD_MACOSX_VERSION_10_13) {
if (!dyld_program_sdk_at_least(dyld_platform_version_macOS_10_13)) {
DisableInitializeForkSafety = true;
if (PrintInitializing) {
_objc_inform("INITIALIZE: disabling +initialize fork "
"safety enforcement because the app is "
"too old (SDK version " SDK_FORMAT ")",
FORMAT_SDK(dyld_get_program_sdk_version()));
"too old.)");
}
}
@ -657,6 +661,11 @@ static void static_init()
for (size_t i = 0; i < count; i++) {
inits[i]();
}
auto offsets = getLibobjcInitializerOffsets(&_mh_dylib_header, &count);
for (size_t i = 0; i < count; i++) {
UnsignedInitializer init(offsets[i]);
init();
}
}
@ -922,7 +931,9 @@ void _objc_init(void)
static_init();
runtime_init();
exception_init();
cache_init();
#if __OBJC2__
cache_t::init();
#endif
_imp_implementationWithBlock_init();
_dyld_objc_notify_register(&map_images, load_images, unmap_image);

View File

@ -53,11 +53,23 @@
#define ASSERT(x) assert(x)
#endif
// `this` is never NULL in C++ unless we encounter UB, but checking for what's impossible
// is the point of these asserts, so disable the corresponding warning, and let's hope
// we will reach the assert despite the UB
#define ASSERT_THIS_NOT_NULL \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wundefined-bool-conversion\"") \
ASSERT(this) \
_Pragma("clang diagnostic pop")
struct objc_class;
struct objc_object;
struct category_t;
typedef struct objc_class *Class;
typedef struct objc_object *id;
typedef struct classref *classref_t;
namespace {
struct SideTable;
@ -69,13 +81,32 @@ union isa_t {
isa_t() { }
isa_t(uintptr_t value) : bits(value) { }
Class cls;
uintptr_t bits;
private:
// Accessing the class requires custom ptrauth operations, so
// force clients to go through setClass/getClass by making this
// private.
Class cls;
public:
#if defined(ISA_BITFIELD)
struct {
ISA_BITFIELD; // defined in isa.h
};
bool isDeallocating() {
return extra_rc == 0 && has_sidetable_rc == 0;
}
void setDeallocating() {
extra_rc = 0;
has_sidetable_rc = 0;
}
#endif
void setClass(Class cls, objc_object *obj);
Class getClass(bool authenticated);
Class getDecodedClass(bool authenticated);
};
@ -86,7 +117,7 @@ private:
public:
// ISA() assumes this is NOT a tagged pointer object
Class ISA();
Class ISA(bool authenticated = false);
// rawISA() assumes this is NOT a tagged pointer object or a non pointer ISA
Class rawISA();
@ -113,6 +144,7 @@ public:
bool hasNonpointerIsa();
bool isTaggedPointer();
bool isTaggedPointerOrNil();
bool isBasicTaggedPointer();
bool isExtTaggedPointer();
bool isClass();
@ -154,22 +186,36 @@ private:
uintptr_t overrelease_error();
#if SUPPORT_NONPOINTER_ISA
// Controls what parts of root{Retain,Release} to emit/inline
// - Full means the full (slow) implementation
// - Fast means the fastpaths only
// - FastOrMsgSend means the fastpaths but checking whether we should call
// -retain/-release or Swift, for the usage of objc_{retain,release}
enum class RRVariant {
Full,
Fast,
FastOrMsgSend,
};
// Unified retain count manipulation for nonpointer isa
id rootRetain(bool tryRetain, bool handleOverflow);
bool rootRelease(bool performDealloc, bool handleUnderflow);
inline id rootRetain(bool tryRetain, RRVariant variant);
inline bool rootRelease(bool performDealloc, RRVariant variant);
id rootRetain_overflow(bool tryRetain);
uintptr_t rootRelease_underflow(bool performDealloc);
void clearDeallocating_slow();
// Side table retain count overflow for nonpointer isa
struct SidetableBorrow { size_t borrowed, remaining; };
void sidetable_lock();
void sidetable_unlock();
void sidetable_moveExtraRC_nolock(size_t extra_rc, bool isDeallocating, bool weaklyReferenced);
bool sidetable_addExtraRC_nolock(size_t delta_rc);
size_t sidetable_subExtraRC_nolock(size_t delta_rc);
SidetableBorrow sidetable_subExtraRC_nolock(size_t delta_rc);
size_t sidetable_getExtraRC_nolock();
void sidetable_clearExtraRC_nolock();
#endif
// Side-table-only retain count
@ -179,10 +225,10 @@ private:
bool sidetable_isWeaklyReferenced();
void sidetable_setWeaklyReferenced_nolock();
id sidetable_retain();
id sidetable_retain(bool locked = false);
id sidetable_retain_slow(SideTable& table);
uintptr_t sidetable_release(bool performDealloc = true);
uintptr_t sidetable_release(bool locked = false, bool performDealloc = true);
uintptr_t sidetable_release_slow(SideTable& table, bool performDealloc = true);
bool sidetable_tryRetain();
@ -241,14 +287,6 @@ typedef struct old_property *objc_property_t;
#include "objc-loadmethod.h"
#if SUPPORT_PREOPT && __cplusplus
#include <objc-shared-cache.h>
using objc_selopt_t = const objc_opt::objc_selopt_t;
#else
struct objc_selopt_t;
#endif
#define STRINGIFY(x) #x
#define STRINGIFY2(x) STRINGIFY(x)
@ -284,16 +322,24 @@ private:
}
};
struct Range shared_cache;
struct Range *ranges;
uint32_t count;
uint32_t size : 31;
uint32_t sorted : 1;
public:
inline bool inSharedCache(uintptr_t ptr) const {
return shared_cache.contains(ptr);
}
inline bool contains(uint16_t witness, uintptr_t ptr) const {
return witness < count && ranges[witness].contains(ptr);
}
inline void setSharedCacheRange(uintptr_t start, uintptr_t end) {
shared_cache = Range{start, end};
add(start, end);
}
bool find(uintptr_t ptr, uint32_t &pos);
void add(uintptr_t start, uintptr_t end);
void remove(uintptr_t start, uintptr_t end);
@ -301,6 +347,10 @@ public:
extern struct SafeRanges dataSegmentsRanges;
static inline bool inSharedCache(uintptr_t ptr) {
return dataSegmentsRanges.inSharedCache(ptr);
}
} // objc
struct header_info;
@ -358,6 +408,22 @@ private:
// from this location.
intptr_t info_offset;
// Offset from this location to the non-lazy class list
intptr_t nlclslist_offset;
uintptr_t nlclslist_count;
// Offset from this location to the non-lazy category list
intptr_t nlcatlist_offset;
uintptr_t nlcatlist_count;
// Offset from this location to the category list
intptr_t catlist_offset;
uintptr_t catlist_count;
// Offset from this location to the category list 2
intptr_t catlist2_offset;
uintptr_t catlist2_count;
// Do not add fields without editing ObjCModernAbstraction.hpp
public:
@ -384,6 +450,30 @@ public:
info_offset = (intptr_t)info - (intptr_t)&info_offset;
}
const classref_t *nlclslist(size_t *outCount) const;
void set_nlclslist(const void *list) {
nlclslist_offset = (intptr_t)list - (intptr_t)&nlclslist_offset;
}
category_t * const *nlcatlist(size_t *outCount) const;
void set_nlcatlist(const void *list) {
nlcatlist_offset = (intptr_t)list - (intptr_t)&nlcatlist_offset;
}
category_t * const *catlist(size_t *outCount) const;
void set_catlist(const void *list) {
catlist_offset = (intptr_t)list - (intptr_t)&catlist_offset;
}
category_t * const *catlist2(size_t *outCount) const;
void set_catlist2(const void *list) {
catlist2_offset = (intptr_t)list - (intptr_t)&catlist2_offset;
}
bool isLoaded() {
return getHeaderInfoRW()->getLoaded();
}
@ -424,6 +514,8 @@ public:
bool hasPreoptimizedProtocols() const;
bool hasPreoptimizedSectionLookups() const;
#if !__OBJC2__
struct old_protocol **proto_refs;
struct objc_module *mod_ptr;
@ -497,9 +589,6 @@ extern bool isPreoptimized(void);
extern bool noMissingWeakSuperclasses(void);
extern header_info *preoptimizedHinfoForHeader(const headerType *mhdr);
extern objc_selopt_t *preoptimizedSelectors(void);
extern bool sharedCacheSupportsProtocolRoots(void);
extern Protocol *getPreoptimizedProtocol(const char *name);
extern Protocol *getSharedCachePreoptimizedProtocol(const char *name);
@ -513,18 +602,22 @@ extern Class _calloc_class(size_t size);
enum {
LOOKUP_INITIALIZE = 1,
LOOKUP_RESOLVER = 2,
LOOKUP_CACHE = 4,
LOOKUP_NIL = 8,
LOOKUP_NIL = 4,
LOOKUP_NOCACHE = 8,
};
extern IMP lookUpImpOrForward(id obj, SEL, Class cls, int behavior);
static inline IMP
lookUpImpOrNil(id obj, SEL sel, Class cls, int behavior = 0)
{
return lookUpImpOrForward(obj, sel, cls, behavior | LOOKUP_CACHE | LOOKUP_NIL);
}
extern IMP lookUpImpOrForwardTryCache(id obj, SEL, Class cls, int behavior = 0);
extern IMP lookUpImpOrNilTryCache(id obj, SEL, Class cls, int behavior = 0);
extern IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel);
struct IMPAndSEL {
IMP imp;
SEL sel;
};
extern IMPAndSEL _method_getImplementationAndName(Method m);
extern BOOL class_respondsToSelector_inst(id inst, SEL sel, Class cls);
extern Class class_initialize(Class cls, id inst);
@ -775,18 +868,18 @@ __attribute__((aligned(1))) typedef int16_t unaligned_int16_t;
// Global operator new and delete. We must not use any app overrides.
// This ALSO REQUIRES each of these be in libobjc's unexported symbol list.
#if __cplusplus
#if __cplusplus && !defined(TEST_OVERRIDES_NEW)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winline-new-delete"
#include <new>
inline void* operator new(std::size_t size) throw (std::bad_alloc) { return malloc(size); }
inline void* operator new[](std::size_t size) throw (std::bad_alloc) { return malloc(size); }
inline void* operator new(std::size_t size, const std::nothrow_t&) throw() { return malloc(size); }
inline void* operator new[](std::size_t size, const std::nothrow_t&) throw() { return malloc(size); }
inline void operator delete(void* p) throw() { free(p); }
inline void operator delete[](void* p) throw() { free(p); }
inline void operator delete(void* p, const std::nothrow_t&) throw() { free(p); }
inline void operator delete[](void* p, const std::nothrow_t&) throw() { free(p); }
inline void* operator new(std::size_t size) { return malloc(size); }
inline void* operator new[](std::size_t size) { return malloc(size); }
inline void* operator new(std::size_t size, const std::nothrow_t&) noexcept(true) { return malloc(size); }
inline void* operator new[](std::size_t size, const std::nothrow_t&) noexcept(true) { return malloc(size); }
inline void operator delete(void* p) noexcept(true) { free(p); }
inline void operator delete[](void* p) noexcept(true) { free(p); }
inline void operator delete(void* p, const std::nothrow_t&) noexcept(true) { free(p); }
inline void operator delete[](void* p, const std::nothrow_t&) noexcept(true) { free(p); }
#pragma clang diagnostic pop
#endif
@ -971,7 +1064,7 @@ class ChainedHookFunction {
std::atomic<Fn> hook{nil};
public:
ChainedHookFunction(Fn f) : hook{f} { };
constexpr ChainedHookFunction(Fn f) : hook{f} { };
Fn get() {
return hook.load(std::memory_order_acquire);
@ -990,10 +1083,10 @@ public:
// A small vector for use as a global variable. Only supports appending and
// iteration. Stores a single element inline, and multiple elements in a heap
// iteration. Stores up to N elements inline, and multiple elements in a heap
// allocation. There is no attempt to amortize reallocation cost; this is
// intended to be used in situation where zero or one element is common, two
// might happen, and three or more is very rare.
// intended to be used in situation where a small number of elements is
// common, more might happen, and significantly more is very rare.
//
// This does not clean up its allocation, and thus cannot be used as a local
// variable or member of something with limited lifetime.
@ -1006,7 +1099,7 @@ protected:
unsigned count{0};
union {
T inlineElements[InlineCount];
T *elements;
T *elements{nullptr};
};
public:

View File

@ -1,3 +0,0 @@
#define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(obj)
#define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW()

View File

@ -60,6 +60,12 @@
#define __ptrauth_swift_value_witness_function_pointer(__key)
#endif
// Workaround <rdar://problem/64531063> Definitions of ptrauth_sign_unauthenticated and friends generate unused variables warnings
#if __has_feature(ptrauth_calls)
#define UNUSED_WITHOUT_PTRAUTH
#else
#define UNUSED_WITHOUT_PTRAUTH __unused
#endif
#if __has_feature(ptrauth_calls)
@ -76,5 +82,123 @@ using MethodListIMP = IMP;
#endif
// A struct that wraps a pointer using the provided template.
// The provided Auth parameter is used to sign and authenticate
// the pointer as it is read and written.
template<typename T, typename Auth>
struct WrappedPtr {
private:
T *ptr;
public:
WrappedPtr(T *p) {
*this = p;
}
WrappedPtr(const WrappedPtr<T, Auth> &p) {
*this = p;
}
WrappedPtr<T, Auth> &operator =(T *p) {
ptr = Auth::sign(p, &ptr);
return *this;
}
WrappedPtr<T, Auth> &operator =(const WrappedPtr<T, Auth> &p) {
*this = (T *)p;
return *this;
}
operator T*() const { return get(); }
T *operator->() const { return get(); }
T *get() const { return Auth::auth(ptr, &ptr); }
// When asserts are enabled, ensure that we can read a byte from
// the underlying pointer. This can be used to catch ptrauth
// errors early for easier debugging.
void validate() const {
#if !NDEBUG
char *p = (char *)get();
char dummy;
memset_s(&dummy, 1, *p, 1);
ASSERT(dummy == *p);
#endif
}
};
// A "ptrauth" struct that just passes pointers through unchanged.
struct PtrauthRaw {
template <typename T>
static T *sign(T *ptr, __unused const void *address) {
return ptr;
}
template <typename T>
static T *auth(T *ptr, __unused const void *address) {
return ptr;
}
};
// A ptrauth struct that stores pointers raw, and strips ptrauth
// when reading.
struct PtrauthStrip {
template <typename T>
static T *sign(T *ptr, __unused const void *address) {
return ptr;
}
template <typename T>
static T *auth(T *ptr, __unused const void *address) {
return ptrauth_strip(ptr, ptrauth_key_process_dependent_data);
}
};
// A ptrauth struct that signs and authenticates pointers using the
// DB key with the given discriminator and address diversification.
template <unsigned discriminator>
struct Ptrauth {
template <typename T>
static T *sign(T *ptr, UNUSED_WITHOUT_PTRAUTH const void *address) {
if (!ptr)
return nullptr;
return ptrauth_sign_unauthenticated(ptr, ptrauth_key_process_dependent_data, ptrauth_blend_discriminator(address, discriminator));
}
template <typename T>
static T *auth(T *ptr, UNUSED_WITHOUT_PTRAUTH const void *address) {
if (!ptr)
return nullptr;
return ptrauth_auth_data(ptr, ptrauth_key_process_dependent_data, ptrauth_blend_discriminator(address, discriminator));
}
};
// A template that produces a WrappedPtr to the given type using a
// plain unauthenticated pointer.
template <typename T> using RawPtr = WrappedPtr<T, PtrauthRaw>;
#if __has_feature(ptrauth_calls)
// Get a ptrauth type that uses a string discriminator.
#if __BUILDING_OBJCDT__
#define PTRAUTH_STR(name) PtrauthStrip
#else
#define PTRAUTH_STR(name) Ptrauth<ptrauth_string_discriminator(#name)>
#endif
// When ptrauth is available, declare a template that wraps a type
// in a WrappedPtr that uses an authenticated pointer using the
// process-dependent data key, address diversification, and a
// discriminator based on the name passed in.
//
// When ptrauth is not available, equivalent to RawPtr.
#define DECLARE_AUTHED_PTR_TEMPLATE(name) \
template <typename T> using name ## _authed_ptr \
= WrappedPtr<T, PTRAUTH_STR(name)>;
#else
#define PTRAUTH_STR(name) PtrauthRaw
#define DECLARE_AUTHED_PTR_TEMPLATE(name) \
template <typename T> using name ## _authed_ptr = RawPtr<T>;
#endif
// _OBJC_PTRAUTH_H_
#endif

View File

@ -35,7 +35,7 @@ __BEGIN_DECLS
extern void _objc_associations_init();
extern void _object_set_associative_reference(id object, const void *key, id value, uintptr_t policy);
extern id _object_get_associative_reference(id object, const void *key);
extern void _object_remove_assocations(id object);
extern void _object_remove_assocations(id object, bool deallocating);
__END_DECLS

View File

@ -38,7 +38,8 @@ enum {
OBJC_ASSOCIATION_SETTER_COPY = 3, // NOTE: both bits are set, so we can simply test 1 bit in releaseValue below.
OBJC_ASSOCIATION_GETTER_READ = (0 << 8),
OBJC_ASSOCIATION_GETTER_RETAIN = (1 << 8),
OBJC_ASSOCIATION_GETTER_AUTORELEASE = (2 << 8)
OBJC_ASSOCIATION_GETTER_AUTORELEASE = (2 << 8),
OBJC_ASSOCIATION_SYSTEM_OBJECT = _OBJC_ASSOCIATION_SYSTEM_OBJECT, // 1 << 16
};
spinlock_t AssociationsManagerLock;
@ -172,6 +173,7 @@ _object_set_associative_reference(id object, const void *key, id value, uintptr_
// retain the new value (if any) outside the lock.
association.acquireValue();
bool isFirstAssociation = false;
{
AssociationsManager manager;
AssociationsHashMap &associations(manager.get());
@ -180,7 +182,7 @@ _object_set_associative_reference(id object, const void *key, id value, uintptr_
auto refs_result = associations.try_emplace(disguised, ObjectAssociationMap{});
if (refs_result.second) {
/* it's the first association we make */
object->setHasAssociatedObjects();
isFirstAssociation = true;
}
/* establish or replace the association */
@ -206,6 +208,13 @@ _object_set_associative_reference(id object, const void *key, id value, uintptr_
}
}
// Call setHasAssociatedObjects outside the lock, since this
// will call the object's _noteAssociatedObjects method if it
// has one, and this may trigger +initialize which might do
// arbitrary stuff, including setting more associated objects.
if (isFirstAssociation)
object->setHasAssociatedObjects();
// release the old value (outside of the lock).
association.releaseHeldValue();
}
@ -215,7 +224,7 @@ _object_set_associative_reference(id object, const void *key, id value, uintptr_
// raw isa objects (such as OS Objects) that can't track
// whether they have associated objects.
void
_object_remove_assocations(id object)
_object_remove_assocations(id object, bool deallocating)
{
ObjectAssociationMap refs{};
@ -225,12 +234,36 @@ _object_remove_assocations(id object)
AssociationsHashMap::iterator i = associations.find((objc_object *)object);
if (i != associations.end()) {
refs.swap(i->second);
// If we are not deallocating, then SYSTEM_OBJECT associations are preserved.
bool didReInsert = false;
if (!deallocating) {
for (auto &ref: refs) {
if (ref.second.policy() & OBJC_ASSOCIATION_SYSTEM_OBJECT) {
i->second.insert(ref);
didReInsert = true;
}
}
}
if (!didReInsert)
associations.erase(i);
}
}
// Associations to be released after the normal ones.
SmallVector<ObjcAssociation *, 4> laterRefs;
// release everything (outside of the lock).
for (auto &i: refs) {
if (i.second.policy() & OBJC_ASSOCIATION_SYSTEM_OBJECT) {
// If we are not deallocating, then RELEASE_LATER associations don't get released.
if (deallocating)
laterRefs.append(&i.second);
} else {
i.second.releaseHeldValue();
}
}
for (auto *later: laterRefs) {
later->releaseHeldValue();
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -33,6 +33,7 @@
* Imports.
**********************************************************************/
#include <os/feature_private.h> // os_feature_enabled_simple()
#include "objc-private.h"
#include "objc-loadmethod.h"
#include "objc-file.h"
@ -87,6 +88,9 @@ const option_t Settings[] = {
#undef OPTION
};
namespace objc {
int PageCountWarning = 50; // Default value if the environment variable is not set
}
// objc's key for pthread_getspecific
#if SUPPORT_DIRECT_THREAD_KEYS
@ -338,6 +342,22 @@ void removeHeader(header_info *hi)
#endif
}
/***********************************************************************
* SetPageCountWarning
* Convert environment variable value to integer value.
* If the value is valid, set the global PageCountWarning value.
**********************************************************************/
void SetPageCountWarning(const char* envvar) {
if (envvar) {
long result = strtol(envvar, NULL, 10);
if (result <= INT_MAX && result >= -1) {
int32_t var = (int32_t)result;
if (var != 0) { // 0 is not a valid value for the env var
objc::PageCountWarning = var;
}
}
}
}
/***********************************************************************
* environ_init
@ -352,6 +372,13 @@ void environ_init(void)
return;
}
// Turn off autorelease LRU coalescing by default for apps linked against
// older SDKs. LRU coalescing can reorder releases and certain older apps
// are accidentally relying on the ordering.
// rdar://problem/63886091
if (!dyld_program_sdk_at_least(dyld_fall_2020_os_versions))
DisableAutoreleaseCoalescingLRU = true;
bool PrintHelp = false;
bool PrintOptions = false;
bool maybeMallocDebugging = false;
@ -376,6 +403,11 @@ void environ_init(void)
continue;
}
if (0 == strncmp(*p, "OBJC_DEBUG_POOL_DEPTH=", 22)) {
SetPageCountWarning(*p + 22);
continue;
}
const char *value = strchr(*p, '=');
if (!*value) continue;
value++;
@ -409,6 +441,10 @@ void environ_init(void)
}
}
if (!os_feature_enabled_simple(objc4, preoptimizedCaches, true)) {
DisablePreoptCaches = true;
}
// Print OBJC_HELP and OBJC_PRINT_OPTIONS output.
if (PrintHelp || PrintOptions) {
if (PrintHelp) {
@ -649,31 +685,25 @@ objc_getAssociatedObject(id object, const void *key)
return _object_get_associative_reference(object, key);
}
static void
_base_objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy)
{
_object_set_associative_reference(object, key, value, policy);
}
static ChainedHookFunction<objc_hook_setAssociatedObject> SetAssocHook{_base_objc_setAssociatedObject};
typedef void (*objc_hook_setAssociatedObject)(id _Nonnull object, const void * _Nonnull key,
id _Nullable value, objc_AssociationPolicy policy);
void
objc_setHook_setAssociatedObject(objc_hook_setAssociatedObject _Nonnull newValue,
objc_hook_setAssociatedObject _Nullable * _Nonnull outOldValue) {
SetAssocHook.set(newValue, outOldValue);
// See objc_object::setHasAssociatedObjects() for a replacement
}
void
objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy)
{
SetAssocHook.get()(object, key, value, policy);
_object_set_associative_reference(object, key, value, policy);
}
void objc_removeAssociatedObjects(id object)
{
if (object && object->hasAssociatedObjects()) {
_object_remove_assocations(object);
_object_remove_assocations(object, /*deallocating*/false);
}
}

View File

@ -33,11 +33,6 @@
#include "objc-private.h"
#include "objc-sel-set.h"
#if SUPPORT_PREOPT
#include <objc-shared-cache.h>
static const objc_selopt_t *builtins = NULL;
#endif
__BEGIN_DECLS
static size_t SelrefCount = 0;
@ -55,10 +50,6 @@ static SEL _objc_search_builtins(const char *key)
if (!key) return (SEL)0;
if ('\0' == *key) return (SEL)_objc_empty_selector;
#if SUPPORT_PREOPT
if (builtins) return (SEL)builtins->get(key);
#endif
return (SEL)0;
}
@ -151,10 +142,6 @@ void sel_init(size_t selrefCount)
// save this value for later
SelrefCount = selrefCount;
#if SUPPORT_PREOPT
builtins = preoptimizedSelectors();
#endif
// Register selectors used by libobjc
mutex_locker_t lock(selLock);

View File

@ -120,7 +120,7 @@ struct __objc_sel_set *__objc_sel_set_create(size_t selrefs) {
sset->_count = 0;
// heuristic to convert executable's selrefs count to table size
#if TARGET_OS_IPHONE && !TARGET_OS_IOSMAC
#if TARGET_OS_IPHONE && !TARGET_OS_MACCATALYST
for (idx = 0; __objc_sel_set_capacities[idx] < selrefs; idx++);
if (idx > 0 && selrefs < 1536) idx--;
#else

View File

@ -2,7 +2,12 @@
#include <mach/vm_param.h>
#if __LP64__
#if __arm64e__
// 0x6AE1
# define PTR(x) .quad x@AUTH(da, 27361, addr)
#else
# define PTR(x) .quad x
#endif
#else
# define PTR(x) .long x
#endif

View File

@ -24,15 +24,8 @@
#if __OBJC2__
#include "objc-private.h"
#include "objc-cache.h"
#include "DenseMapExtras.h"
#if SUPPORT_PREOPT
static const objc_selopt_t *builtins = NULL;
static bool useDyldSelectorLookup = false;
#endif
static objc::ExplicitInitDenseSet<const char *> namedSelectors;
static SEL search_builtins(const char *key);
@ -44,32 +37,13 @@ static SEL search_builtins(const char *key);
void sel_init(size_t selrefCount)
{
#if SUPPORT_PREOPT
// If dyld finds a known shared cache selector, then it must be also looking
// in the shared cache table.
if (_dyld_get_objc_selector("retain") != nil)
useDyldSelectorLookup = true;
else
builtins = preoptimizedSelectors();
if (PrintPreopt && useDyldSelectorLookup) {
if (PrintPreopt) {
_objc_inform("PREOPTIMIZATION: using dyld selector opt");
}
if (PrintPreopt && builtins) {
uint32_t occupied = builtins->occupied;
uint32_t capacity = builtins->capacity;
_objc_inform("PREOPTIMIZATION: using selopt at %p", builtins);
_objc_inform("PREOPTIMIZATION: %u selectors", occupied);
_objc_inform("PREOPTIMIZATION: %u/%u (%u%%) hash table occupancy",
occupied, capacity,
(unsigned)(occupied/(double)capacity*100));
}
namedSelectors.init(useDyldSelectorLookup ? 0 : (unsigned)selrefCount);
#else
namedSelectors.init((unsigned)selrefCount);
#endif
namedSelectors.init((unsigned)selrefCount);
// Register selectors used by libobjc
mutex_locker_t lock(selLock);
@ -93,6 +67,16 @@ const char *sel_getName(SEL sel)
}
unsigned long sel_hash(SEL sel)
{
unsigned long selAddr = (unsigned long)sel;
#if CONFIG_USE_PREOPT_CACHES
selAddr ^= (selAddr >> 7);
#endif
return selAddr;
}
BOOL sel_isMapped(SEL sel)
{
if (!sel) return NO;
@ -110,17 +94,8 @@ BOOL sel_isMapped(SEL sel)
static SEL search_builtins(const char *name)
{
#if SUPPORT_PREOPT
if (builtins) {
SEL result = 0;
if ((result = (SEL)builtins->get(name)))
return result;
if ((result = (SEL)_dyld_get_objc_selector(name)))
return result;
} else if (useDyldSelectorLookup) {
if (SEL result = (SEL)_dyld_get_objc_selector(name))
return result;
}
#endif
return nil;
}

View File

@ -123,9 +123,15 @@ struct weak_table_t {
uintptr_t max_hash_displacement;
};
enum WeakRegisterDeallocatingOptions {
ReturnNilIfDeallocating,
CrashIfDeallocating,
DontCheckDeallocating
};
/// Adds an (object, weak pointer) pair to the weak table.
id weak_register_no_lock(weak_table_t *weak_table, id referent,
id *referrer, bool crashIfDeallocating);
id *referrer, WeakRegisterDeallocatingOptions deallocatingOptions);
/// Removes an (object, weak pointer) pair from the weak table.
void weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer);

View File

@ -389,23 +389,27 @@ weak_unregister_no_lock(weak_table_t *weak_table, id referent_id,
*/
id
weak_register_no_lock(weak_table_t *weak_table, id referent_id,
id *referrer_id, bool crashIfDeallocating)
id *referrer_id, WeakRegisterDeallocatingOptions deallocatingOptions)
{
objc_object *referent = (objc_object *)referent_id;
objc_object **referrer = (objc_object **)referrer_id;
if (!referent || referent->isTaggedPointer()) return referent_id;
if (referent->isTaggedPointerOrNil()) return referent_id;
// ensure that the referenced object is viable
if (deallocatingOptions == ReturnNilIfDeallocating ||
deallocatingOptions == CrashIfDeallocating) {
bool deallocating;
if (!referent->ISA()->hasCustomRR()) {
deallocating = referent->rootIsDeallocating();
}
else {
BOOL (*allowsWeakReference)(objc_object *, SEL) =
(BOOL(*)(objc_object *, SEL))
object_getMethodImplementation((id)referent,
@selector(allowsWeakReference));
// Use lookUpImpOrForward so we can avoid the assert in
// class_getInstanceMethod, since we intentionally make this
// callout with the lock held.
auto allowsWeakReference = (BOOL(*)(objc_object *, SEL))
lookUpImpOrForwardTryCache((id)referent, @selector(allowsWeakReference),
referent->getIsa());
if ((IMP)allowsWeakReference == _objc_msgForward) {
return nil;
}
@ -414,7 +418,7 @@ weak_register_no_lock(weak_table_t *weak_table, id referent_id,
}
if (deallocating) {
if (crashIfDeallocating) {
if (deallocatingOptions == CrashIfDeallocating) {
_objc_fatal("Cannot form weak reference to instance (%p) of "
"class %s. It is possible that this object was "
"over-released, or is in the process of deallocation.",
@ -423,6 +427,7 @@ weak_register_no_lock(weak_table_t *weak_table, id referent_id,
return nil;
}
}
}
// now remember it and where it is being stored
weak_entry_t *entry;

View File

@ -67,7 +67,7 @@ typedef id _Nullable (*IMP)(id _Nonnull, SEL _Nonnull, ...);
# endif
#else
// __OBJC_BOOL_IS_BOOL not set.
# if TARGET_OS_OSX || TARGET_OS_IOSMAC || ((TARGET_OS_IOS || TARGET_OS_BRIDGE) && !__LP64__ && !__ARM_ARCH_7K)
# if TARGET_OS_OSX || TARGET_OS_MACCATALYST || ((TARGET_OS_IOS || TARGET_OS_BRIDGE) && !__LP64__ && !__ARM_ARCH_7K)
# define OBJC_BOOL_IS_BOOL 0
# else
# define OBJC_BOOL_IS_BOOL 1
@ -180,8 +180,7 @@ OBJC_EXPORT const char * _Nonnull object_getClassName(id _Nullable obj)
* @note In a garbage-collected environment, the memory is scanned conservatively.
*/
OBJC_EXPORT void * _Nullable object_getIndexedIvars(id _Nullable obj)
OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0, 2.0)
OBJC_ARC_UNAVAILABLE;
OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0, 2.0);
/**
* Identifies a selector as being valid or invalid.

View File

@ -1767,43 +1767,6 @@ OBJC_EXPORT void objc_setHook_getClass(objc_hook_getClass _Nonnull newValue,
OBJC_AVAILABLE(10.14.4, 12.2, 12.2, 5.2, 3.2);
#endif
/**
* Function type for a hook that assists objc_setAssociatedObject().
*
* @param object The source object for the association.
* @param key The key for the association.
* @param value The value to associate with the key key for object. Pass nil to clear an existing association.
* @param policy The policy for the association. For possible values, see Associative Object Behaviors.
*
* @see objc_setAssociatedObject
* @see objc_setHook_setAssociatedObject
*/
typedef void (*objc_hook_setAssociatedObject)(id _Nonnull object, const void * _Nonnull key,
id _Nullable value, objc_AssociationPolicy policy);
/**
* Install a hook for objc_setAssociatedObject().
*
* @param newValue The hook function to install.
* @param outOldValue The address of a function pointer variable. On return,
* the old hook function is stored in the variable.
*
* @note The store to *outOldValue is thread-safe: the variable will be
* updated before objc_setAssociatedObject() calls your new hook to read it,
* even if your new hook is called from another thread before this
* setter completes.
* @note Your hook should always call the previous hook.
*
* @see objc_setAssociatedObject
* @see objc_hook_setAssociatedObject
*/
#if !(TARGET_OS_OSX && __i386__)
#define OBJC_SETASSOCIATEDOBJECTHOOK_DEFINED 1
OBJC_EXPORT void objc_setHook_setAssociatedObject(objc_hook_setAssociatedObject _Nonnull newValue,
objc_hook_setAssociatedObject _Nullable * _Nonnull outOldValue)
OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0);
#endif
/**
* Function type for a function that is called when an image is loaded.
*
@ -1831,6 +1794,38 @@ typedef void (*objc_func_loadImage)(const struct mach_header * _Nonnull header);
OBJC_EXPORT void objc_addLoadImageFunc(objc_func_loadImage _Nonnull func)
OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0);
/**
* Function type for a hook that provides a name for lazily named classes.
*
* @param cls The class to generate a name for.
* @return The name of the class, or NULL if the name isn't known or can't me generated.
*
* @see objc_setHook_lazyClassNamer
*/
typedef const char * _Nullable (*objc_hook_lazyClassNamer)(_Nonnull Class cls);
/**
* Install a hook to provide a name for lazily-named classes.
*
* @param newValue The hook function to install.
* @param outOldValue The address of a function pointer variable. On return,
* the old hook function is stored in the variable.
*
* @note The store to *outOldValue is thread-safe: the variable will be
* updated before objc_getClass() calls your new hook to read it,
* even if your new hook is called from another thread before this
* setter completes.
* @note Your hook must call the previous hook for class names
* that you do not recognize.
*/
#if !(TARGET_OS_OSX && __i386__)
#define OBJC_SETHOOK_LAZYCLASSNAMER_DEFINED 1
OBJC_EXPORT
void objc_setHook_lazyClassNamer(_Nonnull objc_hook_lazyClassNamer newValue,
_Nonnull objc_hook_lazyClassNamer * _Nonnull oldOutValue)
OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 5.0);
#endif
/**
* Callback from Objective-C to Swift to perform Swift class initialization.
*/

Binary file not shown.

View File

@ -3,6 +3,8 @@
#include "test.h"
#include <Foundation/NSObject.h>
#include <objc/runtime.h>
#include <objc/objc-internal.h>
#include <Block.h>
static int values;
static int supers;
@ -85,6 +87,100 @@ static const char *key = "key";
}
@end
@interface Sub59318867: NSObject @end
@implementation Sub59318867
+ (void)initialize {
objc_setAssociatedObject(self, &key, self, OBJC_ASSOCIATION_ASSIGN);
}
@end
@interface CallOnDealloc: NSObject @end
@implementation CallOnDealloc {
void (^_block)(void);
}
- (id)initWithBlock: (void (^)(void))block {
_block = (__bridge id)Block_copy((__bridge void *)block);
return self;
}
- (void)dealloc {
_block();
_Block_release((__bridge void *)_block);
SUPER_DEALLOC();
}
@end
void TestReleaseLater(void) {
int otherObjsCount = 100;
char keys1[otherObjsCount];
char keys2[otherObjsCount];
char laterKey;
__block int normalDeallocs = 0;
__block int laterDeallocs = 0;
{
id target = [NSObject new];
for (int i = 0; i < otherObjsCount; i++) {
id value = [[CallOnDealloc alloc] initWithBlock: ^{ normalDeallocs++; }];
objc_setAssociatedObject(target, keys1 + i, value, OBJC_ASSOCIATION_RETAIN);
RELEASE_VALUE(value);
}
{
id laterValue = [[CallOnDealloc alloc] initWithBlock: ^{
testassertequal(laterDeallocs, 0);
testassertequal(normalDeallocs, otherObjsCount * 2);
laterDeallocs++;
}];
objc_setAssociatedObject(target, &laterKey, laterValue, (objc_AssociationPolicy)(OBJC_ASSOCIATION_RETAIN | _OBJC_ASSOCIATION_SYSTEM_OBJECT));
RELEASE_VALUE(laterValue);
}
for (int i = 0; i < otherObjsCount; i++) {
id value = [[CallOnDealloc alloc] initWithBlock: ^{ normalDeallocs++; }];
objc_setAssociatedObject(target, keys2 + i, value, OBJC_ASSOCIATION_RETAIN);
RELEASE_VALUE(value);
}
RELEASE_VALUE(target);
}
testassertequal(laterDeallocs, 1);
testassertequal(normalDeallocs, otherObjsCount * 2);
}
void TestReleaseLaterRemoveAssociations(void) {
char normalKey;
char laterKey;
__block int normalDeallocs = 0;
__block int laterDeallocs = 0;
@autoreleasepool {
id target = [NSObject new];
{
id normalValue = [[CallOnDealloc alloc] initWithBlock: ^{ normalDeallocs++; }];
id laterValue = [[CallOnDealloc alloc] initWithBlock: ^{ laterDeallocs++; }];
objc_setAssociatedObject(target, &normalKey, normalValue, OBJC_ASSOCIATION_RETAIN);
objc_setAssociatedObject(target, &laterKey, laterValue, (objc_AssociationPolicy)(OBJC_ASSOCIATION_RETAIN | _OBJC_ASSOCIATION_SYSTEM_OBJECT));
RELEASE_VALUE(normalValue);
RELEASE_VALUE(laterValue);
}
testassertequal(normalDeallocs, 0);
testassertequal(laterDeallocs, 0);
objc_removeAssociatedObjects(target);
testassertequal(normalDeallocs, 1);
testassertequal(laterDeallocs, 0);
id normalValue = objc_getAssociatedObject(target, &normalKey);
id laterValue = objc_getAssociatedObject(target, &laterKey);
testassert(!normalValue);
testassert(laterValue);
RELEASE_VALUE(target);
}
testassertequal(normalDeallocs, 1);
testassertequal(laterDeallocs, 1);
}
int main()
{
@ -123,5 +219,13 @@ int main()
objc_setAssociatedObject(nil, &key, nil, OBJC_ASSOCIATION_ASSIGN);
#pragma clang diagnostic pop
// rdar://problem/59318867 Make sure we don't reenter the association lock
// when setting an associated object on an uninitialized class.
Class Sub59318867Local = objc_getClass("Sub59318867");
objc_setAssociatedObject(Sub59318867Local, &key, Sub59318867Local, OBJC_ASSOCIATION_ASSIGN);
TestReleaseLater();
TestReleaseLaterRemoveAssociations();
succeed(__FILE__);
}

View File

@ -1,14 +0,0 @@
// Run test badPool as if it were built with an old SDK.
// TEST_CONFIG MEM=mrc OS=iphoneos,iphonesimulator,appletvos,appletvsimulator
// TEST_CRASHES
// TEST_CFLAGS -DOLD=1 -Xlinker -sdk_version -Xlinker 9.0
/*
TEST_RUN_OUTPUT
objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .*
OK: badPool.m
END
*/
#include "badPool.m"

18
test/badPoolCompat-ios.m Normal file
View File

@ -0,0 +1,18 @@
// Run test badPool as if it were built with an old SDK.
// TEST_CONFIG MEM=mrc OS=iphoneos,iphonesimulator ARCH=x86_64,arm64
// TEST_CRASHES
// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker ios -Xlinker 9.0 -Xlinker 9.0 -miphoneos-version-min=9.0
/*
TEST_BUILD_OUTPUT
ld: warning: passed two min versions.*for platform.*
END
TEST_RUN_OUTPUT
objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .*
OK: badPool.m
END
*/
#include "badPool.m"

View File

@ -1,10 +1,14 @@
// Run test badPool as if it were built with an old SDK.
// TEST_CONFIG MEM=mrc OS=macosx
// TEST_CONFIG MEM=mrc OS=macosx ARCH=x86_64
// TEST_CRASHES
// TEST_CFLAGS -DOLD=1 -Xlinker -sdk_version -Xlinker 10.11
// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker macos -Xlinker 10.11 -Xlinker 10.11 -mmacosx-version-min=10.11
/*
TEST_BUILD_OUTPUT
ld: warning: passed two min versions.*for platform.*
END
TEST_RUN_OUTPUT
objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .*
OK: badPool.m

18
test/badPoolCompat-tvos.m Normal file
View File

@ -0,0 +1,18 @@
// Run test badPool as if it were built with an old SDK.
// TEST_CONFIG MEM=mrc OS=appletvos,appletvsimulator ARCH=x86_64,arm64
// TEST_CRASHES
// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker tvos -Xlinker 9.0 -Xlinker 9.0 -mtvos-version-min=9.0
/*
TEST_BUILD_OUTPUT
ld: warning: passed two min versions.*for platform.*
END
TEST_RUN_OUTPUT
objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .*
OK: badPool.m
END
*/
#include "badPool.m"

View File

@ -2,9 +2,13 @@
// TEST_CONFIG MEM=mrc OS=watchos,watchsimulator
// TEST_CRASHES
// TEST_CFLAGS -DOLD=1 -Xlinker -sdk_version -Xlinker 2.0
// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker watchos -Xlinker 2.0 -Xlinker 2.0 -mwatchos-version-min=2.0
/*
TEST_BUILD_OUTPUT
ld: warning: passed two min versions.*for platform.*
END
TEST_RUN_OUTPUT
objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .*
OK: badPool.m

View File

@ -26,7 +26,7 @@ int main()
// Create a cycle in a superclass chain (Sub->supercls == Sub)
// then attempt to walk that chain. Runtime should halt eventually.
_objc_flush_caches(supercls);
((Class *)(__bridge void *)subcls)[1] = subcls;
((Class __ptrauth_objc_super_pointer *)(__bridge void *)subcls)[1] = subcls;
#ifdef CACHE_FLUSH
_objc_flush_caches(supercls);
#else

View File

@ -1,13 +1,4 @@
// TEST_CONFIG MEM=mrc
/*
TEST_RUN_OUTPUT
objc\[\d+\]: Deallocator object 0x[0-9a-fA-F]+ overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug
OK: bigrc.m
OR
no overrelease enforcement
OK: bigrc.m
END
*/
#include "test.h"
#include "testroot.i"
@ -20,37 +11,15 @@ static size_t LOTS;
-(void)dealloc
{
id o = self;
size_t rc = 1;
testprintf("Retain a lot during dealloc\n");
testprintf("Retain/release during dealloc\n");
testassert(rc == 1);
testassert([o retainCount] == rc);
do {
testassertequal([o retainCount], 0);
[o retain];
if (rc % 0x100000 == 0) testprintf("%zx/%zx ++\n", rc, LOTS);
} while (++rc < LOTS);
testassert([o retainCount] == rc);
do {
[o release];
if (rc % 0x100000 == 0) testprintf("%zx/%zx --\n", rc, LOTS);
} while (--rc > 1);
testassert(rc == 1);
testassert([o retainCount] == rc);
testprintf("Overrelease during dealloc\n");
// Not all architectures enforce this.
#if !SUPPORT_NONPOINTER_ISA
testwarn("no overrelease enforcement");
fprintf(stderr, "no overrelease enforcement\n");
#endif
testassertequal([o retainCount], 0);
[o release];
testassertequal([o retainCount], 0);
[super dealloc];
}

View File

@ -5,7 +5,11 @@
#include <objc/objc.h>
#if TARGET_OS_OSX
# if __x86_64__
# define RealBool 0
# else
# define RealBool 1
# endif
#elif TARGET_OS_IOS || TARGET_OS_BRIDGE
# if (__arm__ && !__armv7k__) || __i386__
# define RealBool 0

View File

@ -0,0 +1,44 @@
// TEST_CFLAGS -framework Foundation
/*
TEST_RUN_OUTPUT
foo
bar
bar
foo
END
*/
// NOTE: This test won't catch problems when running against a root, so it's of
// limited utility, but it would at least catch things when testing against the
// shared cache.
#include <Foundation/Foundation.h>
#include <objc/runtime.h>
@interface NSBlock: NSObject @end
// NSBlock is a conveniently accessible superclass that (currently) has a constant cache.
@interface MyBlock: NSBlock
+(void)foo;
+(void)bar;
@end
@implementation MyBlock
+(void)foo {
printf("foo\n");
}
+(void)bar {
printf("bar\n");
}
@end
int main() {
[MyBlock foo];
[MyBlock bar];
Method m1 = class_getClassMethod([MyBlock class], @selector(foo));
Method m2 = class_getClassMethod([MyBlock class], @selector(bar));
method_exchangeImplementations(m1, m2);
[MyBlock foo];
[MyBlock bar];
}

View File

@ -0,0 +1,142 @@
//TEST_CONFIG MEM=mrc ARCH=x86_64,ARM64,ARM64e
//TEST_ENV OBJC_DISABLE_AUTORELEASE_COALESCING=NO OBJC_DISABLE_AUTORELEASE_COALESCING_LRU=NO
#include "test.h"
#import <Foundation/NSObject.h>
#include <os/feature_private.h>
@interface Counter: NSObject {
@public
int retains;
int releases;
int autoreleases;
}
@end
@implementation Counter
- (id)retain {
retains++;
return [super retain];
}
- (oneway void)release {
releases++;
[super release];
}
- (id)autorelease {
autoreleases++;
return [super autorelease];
}
- (void)dealloc {
testprintf("%p dealloc\n", self);
[super dealloc];
}
@end
// Create a number of objects, autoreleasing each one a number of times in a
// round robin fashion. Verify that each object gets sent retain, release, and
// autorelease the correct number of times. Verify that the gap between
// autoreleasepool pointers is the given number of objects. Note: this will not
// work when the pool hits a page boundary, to be sure to stay under that limit.
void test(int objCount, int autoreleaseCount, int expectedGap) {
testprintf("Testing %d objects, %d autoreleases, expecting gap of %d\n",
objCount, autoreleaseCount, expectedGap);
Counter *objs[objCount];
for (int i = 0; i < objCount; i++)
objs[i] = [Counter new];
for (int j = 0; j < autoreleaseCount; j++)
for (int i = 0; i < objCount; i++)
[objs[i] retain];
for (int i = 0; i < objCount; i++) {
testassertequal(objs[i]->retains, autoreleaseCount);
testassertequal(objs[i]->releases, 0);
testassertequal(objs[i]->autoreleases, 0);
}
void *outer = objc_autoreleasePoolPush();
uintptr_t outerAddr = (uintptr_t)outer;
for (int j = 0; j < autoreleaseCount; j++)
for (int i = 0; i < objCount; i++)
[objs[i] autorelease];
for (int i = 0; i < objCount; i++) {
testassertequal(objs[i]->retains, autoreleaseCount);
testassertequal(objs[i]->releases, 0);
testassertequal(objs[i]->autoreleases, autoreleaseCount);
}
void *inner = objc_autoreleasePoolPush();
uintptr_t innerAddr = (uintptr_t)inner;
testprintf("outer=%p inner=%p\n", outer, inner);
// Do one more autorelease in the inner pool to make sure we correctly
// handle pool boundaries.
for (int i = 0; i < objCount; i++)
[[objs[i] retain] autorelease];
for (int i = 0; i < objCount; i++) {
testassertequal(objs[i]->retains, autoreleaseCount + 1);
testassertequal(objs[i]->releases, 0);
testassertequal(objs[i]->autoreleases, autoreleaseCount + 1);
}
objc_autoreleasePoolPop(inner);
for (int i = 0; i < objCount; i++) {
testassertequal(objs[i]->retains, autoreleaseCount + 1);
testassertequal(objs[i]->releases, 1);
testassertequal(objs[i]->autoreleases, autoreleaseCount + 1);
}
objc_autoreleasePoolPop(outer);
for (int i = 0; i < objCount; i++) {
testassertequal(objs[i]->retains, autoreleaseCount + 1);
testassertequal(objs[i]->releases, autoreleaseCount + 1);
testassertequal(objs[i]->autoreleases, autoreleaseCount + 1);
}
intptr_t gap = innerAddr - outerAddr;
testprintf("gap=%ld\n", gap);
testassertequal(gap, expectedGap * sizeof(id));
// Destroy our test objects.
for (int i = 0; i < objCount; i++)
[objs[i] release];
}
int main()
{
// Push a pool here so test() doesn't see a placeholder.
objc_autoreleasePoolPush();
test(1, 1, 2);
test(1, 2, 2);
test(1, 10, 2);
test(1, 100, 2);
test(1, 70000, 3);
test(2, 1, 3);
test(2, 2, 3);
test(2, 10, 3);
test(2, 100, 3);
test(2, 70000, 5);
test(3, 1, 4);
test(3, 2, 4);
test(3, 10, 4);
test(3, 100, 4);
test(3, 70000, 7);
test(4, 1, 5);
test(4, 2, 5);
test(4, 10, 5);
test(4, 100, 5);
test(4, 70000, 9);
test(5, 1, 6);
test(5, 2, 11);
succeed(__FILE__);
}

View File

@ -10,6 +10,8 @@ typedef IMP __ptrauth_objc_method_list_imp MethodListIMP;
typedef IMP MethodListIMP;
#endif
EXTERN_C void _method_setImplementationRawUnsafe(Method m, IMP imp);
static int Retains;
static int Releases;
static int Autoreleases;
@ -64,7 +66,7 @@ int main(int argc __unused, char **argv)
#if SWIZZLE_AWZ
method_setImplementation(meth, (IMP)HackAllocWithZone);
#else
((MethodListIMP *)meth)[2] = (IMP)HackAllocWithZone;
_method_setImplementationRawUnsafe(meth, (IMP)HackAllocWithZone);
#endif
meth = class_getClassMethod(cls, @selector(new));
@ -72,7 +74,7 @@ int main(int argc __unused, char **argv)
#if SWIZZLE_CORE
method_setImplementation(meth, (IMP)HackPlusNew);
#else
((MethodListIMP *)meth)[2] = (IMP)HackPlusNew;
_method_setImplementationRawUnsafe(meth, (IMP)HackPlusNew);
#endif
meth = class_getClassMethod(cls, @selector(self));
@ -80,7 +82,7 @@ int main(int argc __unused, char **argv)
#if SWIZZLE_CORE
method_setImplementation(meth, (IMP)HackPlusSelf);
#else
((MethodListIMP *)meth)[2] = (IMP)HackPlusSelf;
_method_setImplementationRawUnsafe(meth, (IMP)HackPlusSelf);
#endif
meth = class_getInstanceMethod(cls, @selector(self));
@ -88,7 +90,7 @@ int main(int argc __unused, char **argv)
#if SWIZZLE_CORE
method_setImplementation(meth, (IMP)HackSelf);
#else
((MethodListIMP *)meth)[2] = (IMP)HackSelf;
_method_setImplementationRawUnsafe(meth, (IMP)HackSelf);
#endif
meth = class_getInstanceMethod(cls, @selector(release));
@ -96,25 +98,25 @@ int main(int argc __unused, char **argv)
#if SWIZZLE_RELEASE
method_setImplementation(meth, (IMP)HackRelease);
#else
((MethodListIMP *)meth)[2] = (IMP)HackRelease;
_method_setImplementationRawUnsafe(meth, (IMP)HackRelease);
#endif
// These other methods get hacked for counting purposes only
meth = class_getInstanceMethod(cls, @selector(retain));
RealRetain = (typeof(RealRetain))method_getImplementation(meth);
((MethodListIMP *)meth)[2] = (IMP)HackRetain;
_method_setImplementationRawUnsafe(meth, (IMP)HackRetain);
meth = class_getInstanceMethod(cls, @selector(autorelease));
RealAutorelease = (typeof(RealAutorelease))method_getImplementation(meth);
((MethodListIMP *)meth)[2] = (IMP)HackAutorelease;
_method_setImplementationRawUnsafe(meth, (IMP)HackAutorelease);
meth = class_getClassMethod(cls, @selector(alloc));
RealAlloc = (typeof(RealAlloc))method_getImplementation(meth);
((MethodListIMP *)meth)[2] = (IMP)HackAlloc;
_method_setImplementationRawUnsafe(meth, (IMP)HackAlloc);
meth = class_getInstanceMethod(cls, @selector(init));
((MethodListIMP *)meth)[2] = (IMP)HackInit;
_method_setImplementationRawUnsafe(meth, (IMP)HackInit);
// Verify that the swizzles occurred before +initialize by provoking it now
testassert(PlusInitializes == 0);

View File

@ -191,38 +191,31 @@ int main(int argc __unused, char **argv)
// Don't use runtime functions to do this -
// we want the runtime to think that these are NSObject's real code
{
#if __has_feature(ptrauth_calls)
typedef IMP __ptrauth_objc_method_list_imp MethodListIMP;
#else
typedef IMP MethodListIMP;
#endif
Class cls = [NSObject class];
IMP imp = class_getMethodImplementation(cls, @selector(retain));
MethodListIMP *m = (MethodListIMP *)
class_getInstanceMethod(cls, @selector(retain));
testassert(m[2] == imp); // verify Method struct is as we expect
Method m = class_getInstanceMethod(cls, @selector(retain));
testassert(method_getImplementation(m) == imp); // verify Method struct is as we expect
m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(retain));
m[2] = (IMP)HackRetain;
m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(release));
m[2] = (IMP)HackRelease;
m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(autorelease));
m[2] = (IMP)HackAutorelease;
m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(retainCount));
m[2] = (IMP)HackRetainCount;
m = (MethodListIMP *)class_getClassMethod(cls, @selector(retain));
m[2] = (IMP)HackPlusRetain;
m = (MethodListIMP *)class_getClassMethod(cls, @selector(release));
m[2] = (IMP)HackPlusRelease;
m = (MethodListIMP *)class_getClassMethod(cls, @selector(autorelease));
m[2] = (IMP)HackPlusAutorelease;
m = (MethodListIMP *)class_getClassMethod(cls, @selector(retainCount));
m[2] = (IMP)HackPlusRetainCount;
m = (MethodListIMP *)class_getClassMethod(cls, @selector(alloc));
m[2] = (IMP)HackAlloc;
m = (MethodListIMP *)class_getClassMethod(cls, @selector(allocWithZone:));
m[2] = (IMP)HackAllocWithZone;
m = class_getInstanceMethod(cls, @selector(retain));
_method_setImplementationRawUnsafe(m, (IMP)HackRetain);
m = class_getInstanceMethod(cls, @selector(release));
_method_setImplementationRawUnsafe(m, (IMP)HackRelease);
m = class_getInstanceMethod(cls, @selector(autorelease));
_method_setImplementationRawUnsafe(m, (IMP)HackAutorelease);
m = class_getInstanceMethod(cls, @selector(retainCount));
_method_setImplementationRawUnsafe(m, (IMP)HackRetainCount);
m = class_getClassMethod(cls, @selector(retain));
_method_setImplementationRawUnsafe(m, (IMP)HackPlusRetain);
m = class_getClassMethod(cls, @selector(release));
_method_setImplementationRawUnsafe(m, (IMP)HackPlusRelease);
m = class_getClassMethod(cls, @selector(autorelease));
_method_setImplementationRawUnsafe(m, (IMP)HackPlusAutorelease);
m = class_getClassMethod(cls, @selector(retainCount));
_method_setImplementationRawUnsafe(m, (IMP)HackPlusRetainCount);
m = class_getClassMethod(cls, @selector(alloc));
_method_setImplementationRawUnsafe(m, (IMP)HackAlloc);
m = class_getClassMethod(cls, @selector(allocWithZone:));
_method_setImplementationRawUnsafe(m, (IMP)HackAllocWithZone);
_objc_flush_caches(cls);

View File

@ -12,8 +12,14 @@
#if __has_feature(ptrauth_calls)
# define SIGNED_METHOD_LIST_IMP "@AUTH(ia,0,addr) "
# define SIGNED_METHOD_LIST "@AUTH(da,0xC310,addr) "
# define SIGNED_ISA "@AUTH(da, 0x6AE1, addr) "
# define SIGNED_SUPER "@AUTH(da, 0xB5AB, addr) "
#else
# define SIGNED_METHOD_LIST_IMP
# define SIGNED_METHOD_LIST
# define SIGNED_ISA
# define SIGNED_SUPER
#endif
#define str(x) #x
@ -29,7 +35,7 @@ asm(
".section __DATA,__objc_data \n"
".align 3 \n"
"_OBJC_CLASS_$_Super: \n"
PTR "_OBJC_METACLASS_$_Super \n"
PTR "_OBJC_METACLASS_$_Super" SIGNED_ISA "\n"
PTR "0 \n"
PTR "__objc_empty_cache \n"
PTR "0 \n"
@ -64,8 +70,8 @@ asm(
PTR "0 \n"
""
"_OBJC_METACLASS_$_Super: \n"
PTR "_OBJC_METACLASS_$_Super \n"
PTR "_OBJC_CLASS_$_Super \n"
PTR "_OBJC_METACLASS_$_Super" SIGNED_ISA "\n"
PTR "_OBJC_CLASS_$_Super" SIGNED_SUPER "\n"
PTR "__objc_empty_cache \n"
PTR "0 \n"
PTR "L_meta_ro \n"
@ -108,9 +114,9 @@ asm(
PTR "0 \n"
PTR "L_super_name \n"
#if EVIL_SUPER
PTR "L_evil_methods \n"
PTR "L_evil_methods" SIGNED_METHOD_LIST "\n"
#else
PTR "L_good_methods \n"
PTR "L_good_methods" SIGNED_METHOD_LIST "\n"
#endif
PTR "0 \n"
PTR "L_super_ivars \n"
@ -127,9 +133,9 @@ asm(
PTR "0 \n"
PTR "L_super_name \n"
#if EVIL_SUPER_META
PTR "L_evil_methods \n"
PTR "L_evil_methods" SIGNED_METHOD_LIST "\n"
#else
PTR "L_good_methods \n"
PTR "L_good_methods" SIGNED_METHOD_LIST "\n"
#endif
PTR "0 \n"
PTR "0 \n"
@ -140,8 +146,8 @@ asm(
".section __DATA,__objc_data \n"
".align 3 \n"
"_OBJC_CLASS_$_Sub: \n"
PTR "_OBJC_METACLASS_$_Sub \n"
PTR "_OBJC_CLASS_$_Super \n"
PTR "_OBJC_METACLASS_$_Sub" SIGNED_ISA "\n"
PTR "_OBJC_CLASS_$_Super" SIGNED_SUPER "\n"
PTR "__objc_empty_cache \n"
PTR "0 \n"
PTR "L_sub_ro \n"
@ -175,8 +181,8 @@ asm(
PTR "0 \n"
""
"_OBJC_METACLASS_$_Sub: \n"
PTR "_OBJC_METACLASS_$_Super \n"
PTR "_OBJC_METACLASS_$_Super \n"
PTR "_OBJC_METACLASS_$_Super" SIGNED_ISA "\n"
PTR "_OBJC_METACLASS_$_Super" SIGNED_SUPER "\n"
PTR "__objc_empty_cache \n"
PTR "0 \n"
PTR "L_sub_meta_ro \n"
@ -219,9 +225,9 @@ asm(
PTR "0 \n"
PTR "L_sub_name \n"
#if EVIL_SUB
PTR "L_evil_methods \n"
PTR "L_evil_methods" SIGNED_METHOD_LIST "\n"
#else
PTR "L_good_methods \n"
PTR "L_good_methods" SIGNED_METHOD_LIST "\n"
#endif
PTR "0 \n"
PTR "L_sub_ivars \n"
@ -238,9 +244,9 @@ asm(
PTR "0 \n"
PTR "L_sub_name \n"
#if EVIL_SUB_META
PTR "L_evil_methods \n"
PTR "L_evil_methods" SIGNED_METHOD_LIST "\n"
#else
PTR "L_good_methods \n"
PTR "L_good_methods" SIGNED_METHOD_LIST "\n"
#endif
PTR "0 \n"
PTR "0 \n"

View File

@ -24,6 +24,9 @@ END
#include <objc/runtime.h>
static int state;
static int swizzleOld;
static int swizzleNew;
static int swizzleB;
#define ONE 1
#define TWO 2
@ -36,6 +39,13 @@ static int state;
+(void) two { state = TWO; }
+(void) length { state = LENGTH; }
+(void) count { state = COUNT; }
-(void) swizzleTarget {
swizzleOld++;
}
-(void) swizzleReplacement {
swizzleNew++;
}
@end
#define checkExchange(s1, v1, s2, v2) \
@ -90,6 +100,42 @@ static int state;
testassert(state == v2); \
} while (0)
@interface A : Super
@end
@implementation A
@end
@interface B : Super
@end
@implementation B
- (void) swizzleTarget {
swizzleB++;
}
@end
@interface C : Super
@end
@implementation C
- (void) hello { }
@end
static IMP findInCache(Class cls, SEL sel)
{
struct objc_imp_cache_entry *ents;
int count;
IMP ret = nil;
ents = class_copyImpCache(cls, &count);
for (int i = 0; i < count; i++) {
if (ents[i].sel == sel) {
ret = ents[i].imp;
break;
}
}
free(ents);
return ret;
}
int main()
{
// Check ordinary selectors
@ -102,5 +148,66 @@ int main()
checkExchange(count, COUNT, one, ONE);
checkExchange(two, TWO, length, LENGTH);
Super *s = [Super new];
A *a = [A new];
B *b = [B new];
C *c = [C new];
// cache swizzleTarget in Super, A and B
[s swizzleTarget];
testassert(swizzleOld == 1);
testassert(swizzleNew == 0);
testassert(swizzleB == 0);
testassert(findInCache([Super class], @selector(swizzleTarget)) != nil);
[a swizzleTarget];
testassert(swizzleOld == 2);
testassert(swizzleNew == 0);
testassert(swizzleB == 0);
testassert(findInCache([A class], @selector(swizzleTarget)) != nil);
[b swizzleTarget];
testassert(swizzleOld == 2);
testassert(swizzleNew == 0);
testassert(swizzleB == 1);
testassert(findInCache([B class], @selector(swizzleTarget)) != nil);
// prime C's cache too
[c hello];
testassert(findInCache([C class], @selector(hello)) != nil);
Method m1 = class_getInstanceMethod([Super class], @selector(swizzleTarget));
Method m2 = class_getInstanceMethod([Super class], @selector(swizzleReplacement));
method_exchangeImplementations(m1, m2);
// this should invalidate Super, A, but:
// - not B because it overrides - swizzleTarget and hence doesn't care
// - not C because it neither called swizzleTarget nor swizzleReplacement
testassert(findInCache([Super class], @selector(swizzleTarget)) == nil);
testassert(findInCache([A class], @selector(swizzleTarget)) == nil);
testassert(findInCache([B class], @selector(swizzleTarget)) != nil);
testassert(findInCache([C class], @selector(hello)) != nil);
// now check that all lookups do the right thing
[s swizzleTarget];
testassert(swizzleOld == 2);
testassert(swizzleNew == 1);
testassert(swizzleB == 1);
[a swizzleTarget];
testassert(swizzleOld == 2);
testassert(swizzleNew == 2);
testassert(swizzleB == 1);
[b swizzleTarget];
testassert(swizzleOld == 2);
testassert(swizzleNew == 2);
testassert(swizzleB == 2);
[c swizzleTarget];
testassert(swizzleOld == 2);
testassert(swizzleNew == 3);
testassert(swizzleB == 2);
succeed(__FILE__);
}

74
test/fakeRealizedClass.m Normal file
View File

@ -0,0 +1,74 @@
/*
Make sure we detect classes with the RW_REALIZED bit set in the binary. rdar://problem/67692760
TEST_CONFIG OS=macosx
TEST_CRASHES
TEST_RUN_OUTPUT
objc\[\d+\]: realized class 0x[0-9a-fA-F]+ has corrupt data pointer 0x[0-9a-fA-F]+
objc\[\d+\]: HALTED
END
*/
#include "test.h"
#include <objc/NSObject.h>
#define RW_REALIZED (1U<<31)
struct ObjCClass {
struct ObjCClass * __ptrauth_objc_isa_pointer isa;
struct ObjCClass * __ptrauth_objc_super_pointer superclass;
void *cachePtr;
uintptr_t zero;
uintptr_t data;
};
struct ObjCClass_ro {
uint32_t flags;
uint32_t instanceStart;
uint32_t instanceSize;
#ifdef __LP64__
uint32_t reserved;
#endif
union {
const uint8_t * ivarLayout;
struct ObjCClass * nonMetaClass;
};
const char * name;
struct ObjCMethodList * __ptrauth_objc_method_list_pointer baseMethodList;
struct protocol_list_t * baseProtocols;
const struct ivar_list_t * ivars;
const uint8_t * weakIvarLayout;
struct property_list_t *baseProperties;
};
extern struct ObjCClass OBJC_METACLASS_$_NSObject;
extern struct ObjCClass OBJC_CLASS_$_NSObject;
struct ObjCClass_ro FakeSuperclassRO = {
.flags = RW_REALIZED
};
struct ObjCClass FakeSuperclass = {
&OBJC_METACLASS_$_NSObject,
&OBJC_METACLASS_$_NSObject,
NULL,
0,
(uintptr_t)&FakeSuperclassRO
};
struct ObjCClass_ro FakeSubclassRO;
struct ObjCClass FakeSubclass = {
&FakeSuperclass,
&FakeSuperclass,
NULL,
0,
(uintptr_t)&FakeSubclassRO
};
static struct ObjCClass *class_ptr __attribute__((used)) __attribute((section("__DATA,__objc_nlclslist"))) = &FakeSubclass;
int main() {}

74
test/fakeRealizedClass2.m Normal file
View File

@ -0,0 +1,74 @@
/*
Variant on fakeRealizedClass which tests a fake class with no superclass rdar://problem/67692760
TEST_CONFIG OS=macosx
TEST_CRASHES
TEST_RUN_OUTPUT
objc\[\d+\]: realized class 0x[0-9a-fA-F]+ has corrupt data pointer 0x[0-9a-fA-F]+
objc\[\d+\]: HALTED
END
*/
#include "test.h"
#include <objc/NSObject.h>
#define RW_REALIZED (1U<<31)
struct ObjCClass {
struct ObjCClass * __ptrauth_objc_isa_pointer isa;
struct ObjCClass * __ptrauth_objc_super_pointer superclass;
void *cachePtr;
uintptr_t zero;
uintptr_t data;
};
struct ObjCClass_ro {
uint32_t flags;
uint32_t instanceStart;
uint32_t instanceSize;
#ifdef __LP64__
uint32_t reserved;
#endif
union {
const uint8_t * ivarLayout;
struct ObjCClass * nonMetaClass;
};
const char * name;
struct ObjCMethodList * __ptrauth_objc_method_list_pointer baseMethodList;
struct protocol_list_t * baseProtocols;
const struct ivar_list_t * ivars;
const uint8_t * weakIvarLayout;
struct property_list_t *baseProperties;
};
extern struct ObjCClass OBJC_METACLASS_$_NSObject;
extern struct ObjCClass OBJC_CLASS_$_NSObject;
struct ObjCClass_ro FakeSuperclassRO = {
.flags = RW_REALIZED
};
struct ObjCClass FakeSuperclass = {
&OBJC_METACLASS_$_NSObject,
NULL,
NULL,
0,
(uintptr_t)&FakeSuperclassRO
};
struct ObjCClass_ro FakeSubclassRO;
struct ObjCClass FakeSubclass = {
&FakeSuperclass,
&FakeSuperclass,
NULL,
0,
(uintptr_t)&FakeSubclassRO
};
static struct ObjCClass *class_ptr __attribute__((used)) __attribute((section("__DATA,__objc_nlclslist"))) = &FakeSubclass;
int main() {}

View File

@ -1,10 +0,0 @@
#include "test.h"
OBJC_ROOT_CLASS
@interface Main @end
@implementation Main @end
int main(int argc __attribute__((unused)), char **argv)
{
succeed(basename(argv[0]));
}

View File

@ -1 +0,0 @@
int GC(void) { return 42; }

View File

@ -1,8 +0,0 @@
#import <objc/objc-api.h>
OBJC_ROOT_CLASS
@interface GC @end
@implementation GC @end
// silence "no debug symbols in executable" warning
void foo(void) { }

View File

@ -1,12 +0,0 @@
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_BUILD
cp $DIR/gcfiles/$C{ARCH}-aso gcenforcer-app-aso.exe
END
TEST_RUN_OUTPUT
.*No Info\.plist file in application bundle or no NSPrincipalClass in the Info\.plist file, exiting
END
*/

View File

@ -1,14 +0,0 @@
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_BUILD
cp $DIR/gcfiles/$C{ARCH}-gc gcenforcer-app-gc.exe
END
TEST_CRASHES
TEST_RUN_OUTPUT
objc\[\d+\]: Objective-C garbage collection is no longer supported\.
objc\[\d+\]: HALTED
END
*/

View File

@ -1,14 +0,0 @@
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_BUILD
cp $DIR/gcfiles/$C{ARCH}-gcaso gcenforcer-app-gcaso.exe
END
TEST_CRASHES
TEST_RUN_OUTPUT
objc\[\d+\]: Objective-C garbage collection is no longer supported\.
objc\[\d+\]: HALTED
END
*/

View File

@ -1,14 +0,0 @@
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_BUILD
cp $DIR/gcfiles/$C{ARCH}-gcaso2 gcenforcer-app-gcaso2.exe
END
TEST_CRASHES
TEST_RUN_OUTPUT
objc\[\d+\]: Objective-C garbage collection is no longer supported\.
objc\[\d+\]: HALTED
END
*/

View File

@ -1,14 +0,0 @@
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_BUILD
cp $DIR/gcfiles/$C{ARCH}-gconly gcenforcer-app-gconly.exe
END
TEST_CRASHES
TEST_RUN_OUTPUT
objc\[\d+\]: Objective-C garbage collection is no longer supported\.
objc\[\d+\]: HALTED
END
*/

View File

@ -1,12 +0,0 @@
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_BUILD
cp $DIR/gcfiles/$C{ARCH}-nogc gcenforcer-app-nogc.exe
END
TEST_RUN_OUTPUT
running
END
*/

View File

@ -1,12 +0,0 @@
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_BUILD
cp $DIR/gcfiles/$C{ARCH}-noobjc gcenforcer-app-noobjc.exe
END
TEST_RUN_OUTPUT
END
*/

View File

@ -1,11 +0,0 @@
// gc-off app loading gc-off dylib: should work
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_BUILD
cp $DIR/gcfiles/libnogc.dylib .
$C{COMPILE} $DIR/gc-main.m -x none libnogc.dylib -o gcenforcer-dylib-nogc.exe
END
*/

View File

@ -1,9 +0,0 @@
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_BUILD
cp $DIR/gcfiles/libnoobjc.dylib .
$C{COMPILE} $DIR/gc-main.m -x none libnoobjc.dylib -o gcenforcer-dylib-noobjc.exe
END
*/

View File

@ -1,22 +0,0 @@
// gc-off app loading gc-required dylib: should crash
// linker sees librequiresgc.fake.dylib, runtime uses librequiresgc.dylib
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_CRASHES
TEST_RUN_OUTPUT
dyld: Library not loaded: librequiresgc\.dylib
Referenced from: .*gcenforcer-dylib-requiresgc.exe
Reason: no suitable image found\. Did find:
(.*librequiresgc\.dylib: cannot load '.*librequiresgc\.dylib' because Objective-C garbage collection is not supported(\n)?)+
librequiresgc.dylib: cannot load 'librequiresgc\.dylib' because Objective-C garbage collection is not supported(
.*librequiresgc\.dylib: cannot load '.*librequiresgc\.dylib' because Objective-C garbage collection is not supported(\n)?)*
END
TEST_BUILD
cp $DIR/gcfiles/librequiresgc.dylib .
$C{COMPILE} $DIR/gc-main.m -x none $DIR/gcfiles/librequiresgc.fake.dylib -o gcenforcer-dylib-requiresgc.exe
END
*/

View File

@ -1,9 +0,0 @@
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_BUILD
cp $DIR/gcfiles/libsupportsgc.dylib .
$C{COMPILE} $DIR/gc-main.m -x none libsupportsgc.dylib -o gcenforcer-dylib-supportsgc.exe
END
*/

View File

@ -1,88 +0,0 @@
#pragma clang diagnostic ignored "-Wcomment"
/*
fixme disabled in BATS because of gcfiles
TEST_CONFIG OS=macosx BATS=0
TEST_BUILD
cp $DIR/gcfiles/* .
$C{COMPILE} $DIR/gcenforcer-preflight.m -o gcenforcer-preflight.exe
END
*/
#include "test.h"
#include <dlfcn.h>
void check(int expected, const char *name)
{
int fd = open(name, O_RDONLY);
testassert(fd >= 0);
int result = objc_appRequiresGC(fd);
close(fd);
testprintf("want %2d got %2d for %s\n", expected, result, name);
if (result != expected) {
fail("want %2d got %2d for %s\n", expected, result, name);
}
testassert(result == expected);
}
int main()
{
int i;
for (i = 0; i < 1000; i++) {
// dlopen_preflight
testassert(dlopen_preflight("libsupportsgc.dylib"));
testassert(dlopen_preflight("libnoobjc.dylib"));
testassert(! dlopen_preflight("librequiresgc.dylib"));
testassert(dlopen_preflight("libnogc.dylib"));
// objc_appRequiresGC
// noobjc: no ObjC content
// nogc: ordinary not GC
// aso: trivial AppleScriptObjC wrapper that can run without GC
// gc: -fobjc-gc
// gconly: -fobjc-gc-only
// gcaso: non-trivial AppleScriptObjC with too many classrefs
// gcaso2: non-trivial AppleScriptObjC with too many class impls
check(0, "x86_64-noobjc");
check(0, "x86_64-nogc");
check(0, "x86_64-aso");
check(1, "x86_64-gc");
check(1, "x86_64-gconly");
check(1, "x86_64-gcaso");
check(1, "x86_64-gcaso2");
check(0, "i386-noobjc");
check(0, "i386-nogc");
check(0, "i386-aso");
check(1, "i386-gc");
check(1, "i386-gconly");
check(1, "i386-gcaso");
check(1, "i386-gcaso2");
// fat files
check(0, "i386-aso--x86_64-aso");
check(0, "i386-nogc--x86_64-nogc");
check(1, "i386-gc--x86_64-gc");
check(1, "i386-gc--x86_64-nogc");
check(1, "i386-nogc--x86_64-gc");
// broken files
check(-1, "x86_64-broken");
check(-1, "i386-broken");
check(-1, "i386-broken--x86_64-gc");
check(-1, "i386-broken--x86_64-nogc");
check(-1, "i386-gc--x86_64-broken");
check(-1, "i386-nogc--x86_64-broken");
// evil files
// evil1: claims to have 4 billion load commands of size 0
check(-1, "evil1");
}
succeed(__FILE__);
}

Some files were not shown because too many files have changed in this diff Show More