diff --git a/libobjc.order b/libobjc.order index c7415fc..1d56c59 100644 --- a/libobjc.order +++ b/libobjc.order @@ -19,8 +19,6 @@ __objc_update_stubs_in_mach_header _sel_init ___sel_registerName __objc_search_builtins -__ZNK8objc_opt13objc_selopt_t3getEPKc -__ZNK8objc_opt13objc_selopt_t4hashEPKc _sel_registerName _arr_init __ZN4objc8DenseMapIP11objc_objectmLb1ENS_12DenseMapInfoIS2_EENS3_ImEEE4initEj diff --git a/markgc.cpp b/markgc.cpp index 4543ad6..bed92dd 100644 --- a/markgc.cpp +++ b/markgc.cpp @@ -391,6 +391,14 @@ void dosect(uint8_t *start, macho_section

*sect) sect->set_sectname("__objc_init_func"); if (debug) printf("disabled __mod_init_func section\n"); } + if (segnameStartsWith(sect->segname(), "__TEXT") && + sectnameEquals(sect->sectname(), "__init_offsets")) + { + // section type 0 is S_REGULAR + sect->set_flags(sect->flags() & ~SECTION_TYPE); + sect->set_sectname("__objc_init_offs"); + if (debug) printf("disabled __mod_init_func section\n"); + } if (segnameStartsWith(sect->segname(), "__DATA") && sectnameEquals(sect->sectname(), "__mod_term_func")) { diff --git a/objc.sln b/objc.sln old mode 100644 new mode 100755 diff --git a/objc.xcodeproj/project.pbxproj b/objc.xcodeproj/project.pbxproj index 80c47fb..9f3248f 100644 --- a/objc.xcodeproj/project.pbxproj +++ b/objc.xcodeproj/project.pbxproj @@ -7,6 +7,17 @@ objects = { /* Begin PBXAggregateTarget section */ + 6EF877EF23263D7000963DBB /* objc_executables */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 6EF877F223263D7000963DBB /* Build configuration list for PBXAggregateTarget "objc_executables" */; + buildPhases = ( + ); + dependencies = ( + 6EF877F423263D8000963DBB /* PBXTargetDependency */, + ); + name = objc_executables; + productName = "objc-executables"; + }; 834F9B01212E560100F95A54 /* objc4_tests */ = { isa = PBXAggregateTarget; buildConfigurationList = 834F9B04212E560200F95A54 /* Build configuration list for PBXAggregateTarget "objc4_tests" */; @@ -45,6 +56,13 @@ 6EACB842232C97A400CE9176 /* objc-zalloc.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EACB841232C97A400CE9176 /* objc-zalloc.h */; }; 6EACB844232C97B900CE9176 /* objc-zalloc.mm in Sources */ = {isa = PBXBuildFile; fileRef = 6EACB843232C97B900CE9176 /* objc-zalloc.mm */; }; 6ECD0B1F2244999E00910D88 /* llvm-DenseSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 6ECD0B1E2244999E00910D88 /* llvm-DenseSet.h */; }; + 6EF877DA2325D62600963DBB /* objcdt.mm in Sources */ = {isa = PBXBuildFile; fileRef = 6EF877D92325D62600963DBB /* objcdt.mm */; }; + 6EF877DE2325D79000963DBB /* objc-probes.d in Sources */ = {isa = PBXBuildFile; fileRef = 87BB4E900EC39633005D08E1 /* objc-probes.d */; }; + 6EF877E02325D92E00963DBB /* CoreSymbolication.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 6EF877DF2325D92E00963DBB /* CoreSymbolication.framework */; }; + 6EF877E22325D93200963DBB /* Symbolication.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 6EF877E12325D93200963DBB /* Symbolication.framework */; }; + 6EF877E52325FAC400963DBB /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 6EF877E42325FAC400963DBB /* Foundation.framework */; }; + 6EF877E82326184000963DBB /* json.mm in Sources */ = {isa = PBXBuildFile; fileRef = 6EF877E72326184000963DBB /* json.mm */; }; + 6EF877EC232635A700963DBB /* objcdt.1 in Install Manpages */ = {isa = PBXBuildFile; fileRef = 6EF877EA232633CC00963DBB /* objcdt.1 */; }; 7213C36321FA7C730090A271 /* NSObject-internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 7213C36221FA7C730090A271 /* NSObject-internal.h */; settings = {ATTRIBUTES = (Private, ); }; }; 7593EC58202248E50046AB96 /* objc-object.h in Headers */ = {isa = PBXBuildFile; fileRef = 7593EC57202248DF0046AB96 /* objc-object.h */; }; 75A9504F202BAA0600D7D56F /* objc-locks-new.h in Headers */ = {isa = PBXBuildFile; fileRef = 75A9504E202BAA0300D7D56F /* objc-locks-new.h */; }; @@ -128,12 +146,22 @@ 83F550E0155E030800E95D3B /* objc-cache-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83F550DF155E030800E95D3B /* objc-cache-old.mm */; }; 87BB4EA70EC39854005D08E1 /* objc-probes.d in Sources */ = {isa = PBXBuildFile; fileRef = 87BB4E900EC39633005D08E1 /* objc-probes.d */; }; 9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9672F7ED14D5F488007CEC96 /* NSObject.mm */; }; + C22F5208230EF38B001BFE14 /* objc-ptrauth.h in Headers */ = {isa = PBXBuildFile; fileRef = C22F5207230EF38B001BFE14 /* objc-ptrauth.h */; }; C2E6D3FC2225DCF00059DFAA /* DenseMapExtras.h in Headers */ = {isa = PBXBuildFile; fileRef = C2E6D3FB2225DCF00059DFAA /* DenseMapExtras.h */; }; + C2EB731D23D8A38A0040672B /* dummy-library-mac-i386.c in Sources */ = {isa = PBXBuildFile; fileRef = C2EB731C23D8A38A0040672B /* dummy-library-mac-i386.c */; }; E8923DA5116AB2820071B552 /* objc-block-trampolines.mm in Sources */ = {isa = PBXBuildFile; fileRef = E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */; }; + E934A9F123E996D00088F26F /* objc4.plist in CopyFiles */ = {isa = PBXBuildFile; fileRef = E934A9EF23E9967D0088F26F /* objc4.plist */; settings = {ATTRIBUTES = (CodeSignOnCopy, ); }; }; F9BCC71B205C68E800DD9AFC /* objc-blocktramps-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 8379996D13CBAF6F007C2B5F /* objc-blocktramps-arm64.s */; }; /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ + 6EF877F323263D8000963DBB /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 6EF877D62325D62600963DBB; + remoteInfo = objcdt; + }; 837F67AC1A771F6E004D34FA /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -150,6 +178,30 @@ }; /* End PBXContainerItemProxy section */ +/* Begin PBXCopyFilesBuildPhase section */ + 6EF877D52325D62600963DBB /* Install Manpages */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/local/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + 6EF877EC232635A700963DBB /* objcdt.1 in Install Manpages */, + ); + name = "Install Manpages"; + runOnlyForDeploymentPostprocessing = 1; + }; + E934A9F023E996CC0088F26F /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 8; + dstPath = /System/Library/FeatureFlags/Domain; + dstSubfolderSpec = 0; + files = ( + E934A9F123E996D00088F26F /* objc4.plist in CopyFiles */, + ); + runOnlyForDeploymentPostprocessing = 1; + }; +/* End PBXCopyFilesBuildPhase section */ + /* Begin PBXFileReference section */ 393CEABF0DC69E3E000B69DE /* objc-references.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-references.mm"; path = "runtime/objc-references.mm"; sourceTree = ""; }; 393CEAC50DC69E67000B69DE /* objc-references.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-references.h"; path = "runtime/objc-references.h"; sourceTree = ""; }; @@ -164,6 +216,15 @@ 6EACB841232C97A400CE9176 /* objc-zalloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-zalloc.h"; path = "runtime/objc-zalloc.h"; sourceTree = ""; }; 6EACB843232C97B900CE9176 /* objc-zalloc.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-zalloc.mm"; path = "runtime/objc-zalloc.mm"; sourceTree = ""; }; 6ECD0B1E2244999E00910D88 /* llvm-DenseSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "llvm-DenseSet.h"; path = "runtime/llvm-DenseSet.h"; sourceTree = ""; }; + 6EF877D72325D62600963DBB /* objcdt */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = objcdt; sourceTree = BUILT_PRODUCTS_DIR; }; + 6EF877D92325D62600963DBB /* objcdt.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = objcdt.mm; sourceTree = ""; usesTabs = 0; }; + 6EF877DF2325D92E00963DBB /* CoreSymbolication.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreSymbolication.framework; path = System/Library/PrivateFrameworks/CoreSymbolication.framework; sourceTree = SDKROOT; }; + 6EF877E12325D93200963DBB /* Symbolication.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Symbolication.framework; path = System/Library/PrivateFrameworks/Symbolication.framework; sourceTree = SDKROOT; }; + 6EF877E32325D95300963DBB /* objcdt-entitlements.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = "objcdt-entitlements.plist"; sourceTree = ""; }; + 6EF877E42325FAC400963DBB /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; }; + 6EF877E62326184000963DBB /* json.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = json.h; sourceTree = ""; usesTabs = 1; }; + 6EF877E72326184000963DBB /* json.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = json.mm; sourceTree = ""; usesTabs = 1; }; + 6EF877EA232633CC00963DBB /* objcdt.1 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.man; path = objcdt.1; sourceTree = ""; }; 7213C36221FA7C730090A271 /* NSObject-internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "NSObject-internal.h"; path = "runtime/NSObject-internal.h"; sourceTree = ""; }; 7593EC57202248DF0046AB96 /* objc-object.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-object.h"; path = "runtime/objc-object.h"; sourceTree = ""; }; 75A9504E202BAA0300D7D56F /* objc-locks-new.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-locks-new.h"; path = "runtime/objc-locks-new.h"; sourceTree = ""; }; @@ -252,15 +313,32 @@ 87BB4E900EC39633005D08E1 /* objc-probes.d */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.dtrace; name = "objc-probes.d"; path = "runtime/objc-probes.d"; sourceTree = ""; }; 9672F7ED14D5F488007CEC96 /* NSObject.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = NSObject.mm; path = runtime/NSObject.mm; sourceTree = ""; }; BC8B5D1212D3D48100C78A5B /* libauto.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libauto.dylib; path = /usr/lib/libauto.dylib; sourceTree = ""; }; + C217B55222DE556D004369BA /* objc-env.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-env.h"; path = "runtime/objc-env.h"; sourceTree = ""; }; + C2296C682457336C003FAE61 /* objc-bp-assist.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "objc-bp-assist.h"; path = "runtime/objc-bp-assist.h"; sourceTree = ""; }; + C22F5207230EF38B001BFE14 /* objc-ptrauth.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-ptrauth.h"; path = "runtime/objc-ptrauth.h"; sourceTree = ""; }; C2E6D3FB2225DCF00059DFAA /* DenseMapExtras.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DenseMapExtras.h; path = runtime/DenseMapExtras.h; sourceTree = ""; }; + C2EB731C23D8A38A0040672B /* dummy-library-mac-i386.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "dummy-library-mac-i386.c"; path = "runtime/dummy-library-mac-i386.c"; sourceTree = ""; }; D2AAC0630554660B00DB518D /* libobjc.A.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libobjc.A.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E8923D9C116AB2820071B552 /* objc-blocktramps-i386.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-blocktramps-i386.s"; path = "runtime/objc-blocktramps-i386.s"; sourceTree = ""; }; E8923D9D116AB2820071B552 /* objc-blocktramps-x86_64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-blocktramps-x86_64.s"; path = "runtime/objc-blocktramps-x86_64.s"; sourceTree = ""; }; E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-block-trampolines.mm"; path = "runtime/objc-block-trampolines.mm"; sourceTree = ""; }; + E934A9EF23E9967D0088F26F /* objc4.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = objc4.plist; sourceTree = ""; }; + E97047552497CC5300781D29 /* check_preopt_caches.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = check_preopt_caches.entitlements; sourceTree = ""; }; + E9AD465924925261002AF1DB /* check_preopt_caches.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = check_preopt_caches.mm; sourceTree = ""; }; F9BCC727205C68E800DD9AFC /* libobjc-trampolines.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = "libobjc-trampolines.dylib"; sourceTree = BUILT_PRODUCTS_DIR; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ + 6EF877D42325D62600963DBB /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 6EF877E22325D93200963DBB /* Symbolication.framework in Frameworks */, + 6EF877E52325FAC400963DBB /* Foundation.framework in Frameworks */, + 6EF877E02325D92E00963DBB /* CoreSymbolication.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; D289988505E68E00004EDB86 /* Frameworks */ = { isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; @@ -289,6 +367,8 @@ 838486270D6D690F00CEA253 /* Obsolete Source */, 08FB7795FE84155DC02AAC07 /* Source */, 838485B20D6D67F900CEA253 /* Other */, + 6EF877D82325D62600963DBB /* objcdt */, + E9AD465824925261002AF1DB /* check-preopt-caches */, 1AB674ADFE9D54B511CA2CBB /* Products */, F9BCC72A205C6A1600DD9AFC /* Frameworks */, ); @@ -298,6 +378,7 @@ 08FB7795FE84155DC02AAC07 /* Source */ = { isa = PBXGroup; children = ( + C2EB731C23D8A38A0040672B /* dummy-library-mac-i386.c */, 838485B80D6D687300CEA253 /* hashtable2.mm */, 838485BC0D6D687300CEA253 /* maptable.mm */, 9672F7ED14D5F488007CEC96 /* NSObject.mm */, @@ -352,10 +433,23 @@ children = ( D2AAC0630554660B00DB518D /* libobjc.A.dylib */, F9BCC727205C68E800DD9AFC /* libobjc-trampolines.dylib */, + 6EF877D72325D62600963DBB /* objcdt */, ); name = Products; sourceTree = ""; }; + 6EF877D82325D62600963DBB /* objcdt */ = { + isa = PBXGroup; + children = ( + 6EF877EA232633CC00963DBB /* objcdt.1 */, + 6EF877E62326184000963DBB /* json.h */, + 6EF877E72326184000963DBB /* json.mm */, + 6EF877D92325D62600963DBB /* objcdt.mm */, + 6EF877E32325D95300963DBB /* objcdt-entitlements.plist */, + ); + path = objcdt; + sourceTree = ""; + }; 838485B20D6D67F900CEA253 /* Other */ = { isa = PBXGroup; children = ( @@ -363,6 +457,7 @@ 838485B40D6D683300CEA253 /* APPLE_LICENSE */, 838485B50D6D683300CEA253 /* ReleaseNotes.rtf */, 83CE671D1E6E76B60095A33E /* interposable.txt */, + E934A9EF23E9967D0088F26F /* objc4.plist */, 838485B30D6D682B00CEA253 /* libobjc.order */, ); name = Other; @@ -389,12 +484,13 @@ 838485C70D6D688200CEA253 /* Private Headers */ = { isa = PBXGroup; children = ( - 7213C36221FA7C730090A271 /* NSObject-internal.h */, - 83112ED30F00599600A5FBAF /* objc-internal.h */, - 834EC0A311614167009B2563 /* objc-abi.h */, 838485BB0D6D687300CEA253 /* maptable.h */, - 834266D70E665A8B002E4DA2 /* objc-gdb.h */, + 7213C36221FA7C730090A271 /* NSObject-internal.h */, + 834EC0A311614167009B2563 /* objc-abi.h */, 8306440620D24A3E00E356D2 /* objc-block-trampolines.h */, + 834266D70E665A8B002E4DA2 /* objc-gdb.h */, + 83112ED30F00599600A5FBAF /* objc-internal.h */, + C22F5207230EF38B001BFE14 /* objc-ptrauth.h */, ); name = "Private Headers"; sourceTree = ""; @@ -437,6 +533,8 @@ 83D9269721225A7400299F69 /* arm64-asm.h */, 83D92695212254CF00299F69 /* isa.h */, 838485CF0D6D68A200CEA253 /* objc-config.h */, + C2296C682457336C003FAE61 /* objc-bp-assist.h */, + C217B55222DE556D004369BA /* objc-env.h */, 83BE02E50FCCB24D00661494 /* objc-file-old.h */, 83BE02E60FCCB24D00661494 /* objc-file.h */, 838485D40D6D68A200CEA253 /* objc-initialize.h */, @@ -457,9 +555,21 @@ name = "Project Headers"; sourceTree = ""; }; + E9AD465824925261002AF1DB /* check-preopt-caches */ = { + isa = PBXGroup; + children = ( + E97047552497CC5300781D29 /* check_preopt_caches.entitlements */, + E9AD465924925261002AF1DB /* check_preopt_caches.mm */, + ); + path = "check-preopt-caches"; + sourceTree = ""; + }; F9BCC72A205C6A1600DD9AFC /* Frameworks */ = { isa = PBXGroup; children = ( + 6EF877E42325FAC400963DBB /* Foundation.framework */, + 6EF877E12325D93200963DBB /* Symbolication.framework */, + 6EF877DF2325D92E00963DBB /* CoreSymbolication.framework */, ); name = Frameworks; sourceTree = ""; @@ -528,6 +638,7 @@ 838486200D6D68A800CEA253 /* runtime.h in Headers */, 39ABD72312F0B61800D1054C /* objc-weak.h in Headers */, 83F4B52815E843B100E0926F /* NSObjCRuntime.h in Headers */, + C22F5208230EF38B001BFE14 /* objc-ptrauth.h in Headers */, 6ECD0B1F2244999E00910D88 /* llvm-DenseSet.h in Headers */, 83F4B52915E843B100E0926F /* NSObject.h in Headers */, ); @@ -536,6 +647,23 @@ /* End PBXHeadersBuildPhase section */ /* Begin PBXNativeTarget section */ + 6EF877D62325D62600963DBB /* objcdt */ = { + isa = PBXNativeTarget; + buildConfigurationList = 6EF877DD2325D62600963DBB /* Build configuration list for PBXNativeTarget "objcdt" */; + buildPhases = ( + 6EF877D32325D62600963DBB /* Sources */, + 6EF877D42325D62600963DBB /* Frameworks */, + 6EF877D52325D62600963DBB /* Install Manpages */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = objcdt; + productName = objcdt; + productReference = 6EF877D72325D62600963DBB /* objcdt */; + productType = "com.apple.product-type.tool"; + }; D2AAC0620554660B00DB518D /* objc */ = { isa = PBXNativeTarget; buildConfigurationList = 1DEB914A08733D8E0010E9CD /* Build configuration list for PBXNativeTarget "objc" */; @@ -545,6 +673,7 @@ D289988505E68E00004EDB86 /* Frameworks */, 830F2AB60D739AB600392440 /* Run Script (markgc) */, 830F2AFA0D73BC5800392440 /* Run Script (symlink) */, + E934A9F023E996CC0088F26F /* CopyFiles */, ); buildRules = ( ); @@ -579,9 +708,15 @@ 08FB7793FE84155DC02AAC07 /* Project object */ = { isa = PBXProject; attributes = { - BuildIndependentTargetsInParallel = NO; LastUpgradeCheck = 0440; TargetAttributes = { + 6EF877D62325D62600963DBB = { + CreatedOnToolsVersion = 11.0; + }; + 6EF877EF23263D7000963DBB = { + CreatedOnToolsVersion = 11.0; + ProvisioningStyle = Automatic; + }; 834F9B01212E560100F95A54 = { CreatedOnToolsVersion = 10.0; DevelopmentTeam = 59GAB85EFG; @@ -610,6 +745,8 @@ 837F67A81A771F63004D34FA /* objc-simulator */, F9BCC6CA205C68E800DD9AFC /* objc-trampolines */, 834F9B01212E560100F95A54 /* objc4_tests */, + 6EF877EF23263D7000963DBB /* objc_executables */, + 6EF877D62325D62600963DBB /* objcdt */, ); }; /* End PBXProject section */ @@ -665,6 +802,16 @@ /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ + 6EF877D32325D62600963DBB /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 6EF877E82326184000963DBB /* json.mm in Sources */, + 6EF877DA2325D62600963DBB /* objcdt.mm in Sources */, + 6EF877DE2325D79000963DBB /* objc-probes.d in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; D2AAC0610554660B00DB518D /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -705,6 +852,7 @@ 83B1A8BE0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s in Sources */, 83EB007B121C9EC200B92C16 /* objc-sel-table.s in Sources */, 39ABD72412F0B61800D1054C /* objc-weak.mm in Sources */, + C2EB731D23D8A38A0040672B /* dummy-library-mac-i386.c in Sources */, 83D49E4F13C7C84F0057F1DD /* objc-msg-arm64.s in Sources */, 9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */, 83725F4A14CA5BFA0014370E /* objc-opt.mm in Sources */, @@ -729,6 +877,11 @@ /* End PBXSourcesBuildPhase section */ /* Begin PBXTargetDependency section */ + 6EF877F423263D8000963DBB /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 6EF877D62325D62600963DBB /* objcdt */; + targetProxy = 6EF877F323263D8000963DBB /* PBXContainerItemProxy */; + }; 837F67AD1A771F6E004D34FA /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = D2AAC0620554660B00DB518D /* objc */; @@ -751,6 +904,8 @@ COPY_PHASE_STRIP = NO; DEPLOYMENT_LOCATION = YES; DYLIB_CURRENT_VERSION = 228; + EXCLUDED_SOURCE_FILE_NAMES = "dummy-library-mac-i386.c"; + "EXCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "*"; EXECUTABLE_PREFIX = lib; GCC_CW_ASM_SYNTAX = NO; GCC_OPTIMIZATION_LEVEL = 0; @@ -762,6 +917,7 @@ "$(CONFIGURATION_BUILD_DIR)/usr/local/include/**", /System/Library/Frameworks/System.framework/PrivateHeaders, ); + "INCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "dummy-library-mac-i386.c"; INSTALL_PATH = /usr/lib; IS_ZIPPERED = YES; LLVM_LTO = NO; @@ -787,6 +943,10 @@ "-interposable_list", "-Xlinker", interposable.txt, + "-Xlinker", + "-headerpad", + "-Xlinker", + 0x100, ); "OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = ( "-lc++abi", @@ -810,7 +970,9 @@ "-interposable_list", "-Xlinker", interposable.txt, + "-loah", ); + "OTHER_LDFLAGS[sdk=macosx*][arch=i386]" = "-nodefaultlibs"; OTHER_TAPI_FLAGS = "-exclude-public-header $(DSTROOT)/usr/include/objc/ObjectiveC.apinotes -exclude-public-header $(DSTROOT)/usr/include/objc/module.modulemap -Xparser -Wno-deprecated-declarations -Xparser -Wno-unavailable-declarations -Xparser -D_OBJC_PRIVATE_H_=1 -DOBJC_DECLARE_SYMBOLS=1"; PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc; PRODUCT_NAME = objc.A; @@ -829,6 +991,8 @@ "COPY_HEADERS_UNIFDEF_FLAGS[sdk=macosx*]" = "-DBUILD_FOR_OSX"; DEPLOYMENT_LOCATION = YES; DYLIB_CURRENT_VERSION = 228; + EXCLUDED_SOURCE_FILE_NAMES = "dummy-library-mac-i386.c"; + "EXCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "*"; EXECUTABLE_PREFIX = lib; GCC_CW_ASM_SYNTAX = NO; GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = NO; @@ -839,6 +1003,7 @@ "$(CONFIGURATION_BUILD_DIR)/usr/local/include/**", /System/Library/Frameworks/System.framework/PrivateHeaders, ); + "INCLUDED_SOURCE_FILE_NAMES[sdk=macosx*][arch=i386]" = "dummy-library-mac-i386.c"; INSTALL_PATH = /usr/lib; IS_ZIPPERED = YES; ORDER_FILE = "$(SDKROOT)/AppleInternal/OrderFiles/libobjc.order"; @@ -863,6 +1028,10 @@ "-interposable_list", "-Xlinker", interposable.txt, + "-Xlinker", + "-headerpad", + "-Xlinker", + 0x100, ); "OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = ( "-lc++abi", @@ -886,7 +1055,9 @@ "-interposable_list", "-Xlinker", interposable.txt, + "-loah", ); + "OTHER_LDFLAGS[sdk=macosx*][arch=i386]" = "-nodefaultlibs"; OTHER_TAPI_FLAGS = "-exclude-public-header $(DSTROOT)/usr/include/objc/ObjectiveC.apinotes -exclude-public-header $(DSTROOT)/usr/include/objc/module.modulemap -Xparser -Wno-deprecated-declarations -Xparser -Wno-unavailable-declarations -Xparser -D_OBJC_PRIVATE_H_=1 -DOBJC_DECLARE_SYMBOLS=1"; PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc; PRODUCT_NAME = objc.A; @@ -908,6 +1079,7 @@ CLANG_CXX_LIBRARY = "libc++"; CLANG_LINK_OBJC_RUNTIME = NO; CLANG_OBJC_RUNTIME = NO; + CODE_SIGN_IDENTITY = "-"; DEBUG_INFORMATION_FORMAT = dwarf; GCC_ENABLE_CPP_EXCEPTIONS = NO; GCC_ENABLE_CPP_RTTI = NO; @@ -954,6 +1126,7 @@ CLANG_CXX_LIBRARY = "libc++"; CLANG_LINK_OBJC_RUNTIME = NO; CLANG_OBJC_RUNTIME = NO; + CODE_SIGN_IDENTITY = "-"; DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; GCC_ENABLE_CPP_EXCEPTIONS = NO; GCC_ENABLE_CPP_RTTI = NO; @@ -995,6 +1168,59 @@ }; name = Release; }; + 6EF877DB2325D62600963DBB /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_ENTITLEMENTS = "objcdt/objcdt-entitlements.plist"; + CODE_SIGN_IDENTITY = "-"; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + GCC_PREPROCESSOR_DEFINITIONS = ( + "__BUILDING_OBJCDT__=1", + "$(inherited)", + ); + HEADER_SEARCH_PATHS = ( + "$(SRCROOT)/runtime", + /System/Library/Frameworks/System.framework/PrivateHeaders, + ); + PRODUCT_NAME = "$(TARGET_NAME)"; + SYSTEM_FRAMEWORK_SEARCH_PATHS = "$(inherited) $(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks"; + }; + name = Debug; + }; + 6EF877DC2325D62600963DBB /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_ENTITLEMENTS = "objcdt/objcdt-entitlements.plist"; + CODE_SIGN_IDENTITY = "-"; + GCC_PREPROCESSOR_DEFINITIONS = ( + "__BUILDING_OBJCDT__=1", + "$(inherited)", + ); + HEADER_SEARCH_PATHS = ( + "$(SRCROOT)/runtime", + /System/Library/Frameworks/System.framework/PrivateHeaders, + ); + PRODUCT_NAME = "$(TARGET_NAME)"; + SYSTEM_FRAMEWORK_SEARCH_PATHS = "$(inherited) $(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks"; + }; + name = Release; + }; + 6EF877F023263D7000963DBB /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Automatic; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 6EF877F123263D7000963DBB /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Automatic; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; 834F9B02212E560200F95A54 /* Debug */ = { isa = XCBuildConfiguration; buildSettings = { @@ -1034,6 +1260,7 @@ COPY_HEADERS_UNIFDEF_FLAGS = "-UBUILD_FOR_OSX"; "COPY_HEADERS_UNIFDEF_FLAGS[sdk=macosx*]" = "-DBUILD_FOR_OSX"; COPY_PHASE_STRIP = NO; + DEPLOYMENT_LOCATION = YES; DYLIB_CURRENT_VERSION = 228; EXECUTABLE_PREFIX = lib; GCC_CW_ASM_SYNTAX = NO; @@ -1055,6 +1282,7 @@ OTHER_LDFLAGS = ( "-Xlinker", "-not_for_dyld_shared_cache", + "-nodefaultlibs", ); PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc; PRODUCT_NAME = "$(TARGET_NAME)"; @@ -1070,6 +1298,7 @@ COPY_HEADERS_RUN_UNIFDEF = YES; COPY_HEADERS_UNIFDEF_FLAGS = "-UBUILD_FOR_OSX"; "COPY_HEADERS_UNIFDEF_FLAGS[sdk=macosx*]" = "-DBUILD_FOR_OSX"; + DEPLOYMENT_LOCATION = YES; DYLIB_CURRENT_VERSION = 228; EXECUTABLE_PREFIX = lib; GCC_CW_ASM_SYNTAX = NO; @@ -1090,6 +1319,7 @@ OTHER_LDFLAGS = ( "-Xlinker", "-not_for_dyld_shared_cache", + "-nodefaultlibs", ); PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc; PRODUCT_NAME = "$(TARGET_NAME)"; @@ -1120,6 +1350,24 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + 6EF877DD2325D62600963DBB /* Build configuration list for PBXNativeTarget "objcdt" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 6EF877DB2325D62600963DBB /* Debug */, + 6EF877DC2325D62600963DBB /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 6EF877F223263D7000963DBB /* Build configuration list for PBXAggregateTarget "objc_executables" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 6EF877F023263D7000963DBB /* Debug */, + 6EF877F123263D7000963DBB /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; 834F9B04212E560200F95A54 /* Build configuration list for PBXAggregateTarget "objc4_tests" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/objc4.plist b/objc4.plist new file mode 100644 index 0000000..157aea8 --- /dev/null +++ b/objc4.plist @@ -0,0 +1,11 @@ + + + + + preoptimizedCaches + + Enabled + + + + diff --git a/objcdt/json.h b/objcdt/json.h new file mode 100644 index 0000000..95dbea2 --- /dev/null +++ b/objcdt/json.h @@ -0,0 +1,82 @@ +/* +* Copyright (c) 2019 Apple Inc. All Rights Reserved. +* +* @APPLE_LICENSE_HEADER_START@ +* +* This file contains Original Code and/or Modifications of Original Code +* as defined in and that are subject to the Apple Public Source License +* Version 2.0 (the 'License'). You may not use this file except in +* compliance with the License. Please obtain a copy of the License at +* http://www.opensource.apple.com/apsl/ and read it before using this +* file. +* +* The Original Code and all software distributed under the License are +* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +* Please see the License for the specific language governing rights and +* limitations under the License. +* +* @APPLE_LICENSE_HEADER_END@ +*/ + +#ifndef _OBJC_OBJCDT_JSON_H_ +#define _OBJC_OBJCDT_JSON_H_ + +#include +#include +#include +#include + +namespace json { + +enum context: uint8_t { + root, + array_value, + object_value, + object_key, + done, +}; + +class writer { +private: + FILE *_file; + context _context; + int _depth; + bool _needs_comma; + + void begin_value(int sep = '\0'); + void advance(context old); + void key(const char *key); + +public: + + writer(FILE *f); + ~writer(); + + void object(std::function); + void object(const char *key, std::function); + + void array(std::function); + void array(const char *key, std::function); + + void boolean(bool value); + void boolean(const char *key, bool value); + + void number(uint64_t value); + void number(const char *key, uint64_t value); + + void string(const char *s); + void string(const char *key, const char *s); + + __printflike(2, 3) + void stringf(const char *fmt, ...); + + __printflike(3, 4) + void stringf(const char *key, const char *fmt, ...); +}; + +} + +#endif /* _OBJC_OBJCDT_JSON_H_ */ diff --git a/objcdt/json.mm b/objcdt/json.mm new file mode 100644 index 0000000..7eb7488 --- /dev/null +++ b/objcdt/json.mm @@ -0,0 +1,234 @@ +/* +* Copyright (c) 2019 Apple Inc. All Rights Reserved. +* +* @APPLE_LICENSE_HEADER_START@ +* +* This file contains Original Code and/or Modifications of Original Code +* as defined in and that are subject to the Apple Public Source License +* Version 2.0 (the 'License'). You may not use this file except in +* compliance with the License. Please obtain a copy of the License at +* http://www.opensource.apple.com/apsl/ and read it before using this +* file. +* +* The Original Code and all software distributed under the License are +* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +* Please see the License for the specific language governing rights and +* limitations under the License. +* +* @APPLE_LICENSE_HEADER_END@ +*/ + +#include +#include "json.h" + +namespace json { + +static bool +context_is_value(context c) +{ + return c == root || c == array_value || c == object_value; +} + +writer::writer(FILE *f) +: _file(f) +, _context(root) +, _depth(0) +, _needs_comma(false) +{ +} + +writer::~writer() +{ + fputc('\n', _file); + fflush(_file); +} + +void +writer::begin_value(int sep) +{ + if (_needs_comma) { + _needs_comma = false; + if (sep) { + fprintf(_file, ", %c\n", sep); + return; + } + fputs(",\n", _file); + } + if (_context == array_value || _context == object_key) { + fprintf(_file, "%*s", _depth * 2, ""); + } + if (sep) { + fprintf(_file, "%c\n", sep); + } +} + +void +writer::advance(context c) +{ + switch (c) { + case root: + _context = done; + _needs_comma = false; + break; + case array_value: + _context = array_value; + _needs_comma = true; + break; + case object_value: + _context = object_key; + _needs_comma = true; + break; + case object_key: + _context = object_value; + _needs_comma = false; + break; + case done: + assert(false); + break; + } +} + +void +writer::key(const char *key) +{ + assert(_context == object_key); + + begin_value(); + fprintf(_file, "\"%s\": ", key); + advance(_context); +} + +void +writer::object(std::function f) +{ + context old = _context; + assert(context_is_value(old)); + + begin_value('{'); + + _depth++; + _context = object_key; + _needs_comma = false; + f(); + + _depth--; + fprintf(_file, "\n%*s}", _depth * 2, ""); + advance(old); +} + +void +writer::object(const char *k, std::function f) +{ + key(k); + object(f); +} + +void +writer::array(std::function f) +{ + context old = _context; + assert(context_is_value(old)); + + begin_value('['); + + _depth++; + _context = array_value; + _needs_comma = false; + f(); + + _depth--; + fprintf(_file, "\n%*s]", _depth * 2, ""); + advance(old); +} + +void +writer::array(const char *k, std::function f) +{ + key(k); + array(f); +} + +void +writer::boolean(bool value) +{ + assert(context_is_value(_context)); + begin_value(); + fputs(value ? "true" : "false", _file); + advance(_context); +} + +void +writer::boolean(const char *k, bool value) +{ + key(k); + boolean(value); +} + +void +writer::number(uint64_t value) +{ + assert(context_is_value(_context)); + begin_value(); + fprintf(_file, "%lld", value); + advance(_context); +} + +void +writer::number(const char *k, uint64_t value) +{ + key(k); + number(value); +} + +void +writer::string(const char *s) +{ + assert(context_is_value(_context)); + begin_value(); + fprintf(_file, "\"%s\"", s); + advance(_context); +} + +void +writer::string(const char *k, const char *s) +{ + key(k); + string(s); +} + +void +writer::stringf(const char *fmt, ...) +{ + va_list ap; + + assert(context_is_value(_context)); + begin_value(); + fputc('"', _file); + va_start(ap, fmt); + vfprintf(_file, fmt, ap); + va_end(ap); + fputc('"', _file); + advance(_context); +} + +void +writer::stringf(const char *k, const char *fmt, ...) +{ + va_list ap; + + key(k); + + assert(context_is_value(_context)); + begin_value(); + fputc('"', _file); + va_start(ap, fmt); + vfprintf(_file, fmt, ap); + va_end(ap); + fputc('"', _file); + advance(_context); +} + +} // json diff --git a/objcdt/objcdt-entitlements.plist b/objcdt/objcdt-entitlements.plist new file mode 100644 index 0000000..b7b4e6c --- /dev/null +++ b/objcdt/objcdt-entitlements.plist @@ -0,0 +1,10 @@ + + + + + task_for_pid-allow + + com.apple.system-task-ports + + + diff --git a/objcdt/objcdt.1 b/objcdt/objcdt.1 new file mode 100644 index 0000000..999a155 --- /dev/null +++ b/objcdt/objcdt.1 @@ -0,0 +1,19 @@ +.\" Copyright (c) 2019, Apple Computer, Inc. All rights reserved. +.\" +.Dd September 9, 2019 \" DATE +.Dt objcdt 1 \" Program name and manual section number +.Os "OS X" +.Sh NAME +.Nm objcdt +.Nd Tool to debug objective-C usage in live processes +.Sh SYNOPSIS +.Nm objcdt +.Sh DESCRIPTION +The +.Nm +utility is a small CLI with embedded help that can dump some information about +the Objective-C runtime state in live processes. +.Pp +Help can be obtained using +.Nm +.Ar help diff --git a/objcdt/objcdt.mm b/objcdt/objcdt.mm new file mode 100644 index 0000000..eca5fa2 --- /dev/null +++ b/objcdt/objcdt.mm @@ -0,0 +1,36 @@ +/* +* Copyright (c) 2019 Apple Inc. All Rights Reserved. +* +* @APPLE_LICENSE_HEADER_START@ +* +* This file contains Original Code and/or Modifications of Original Code +* as defined in and that are subject to the Apple Public Source License +* Version 2.0 (the 'License'). You may not use this file except in +* compliance with the License. Please obtain a copy of the License at +* http://www.opensource.apple.com/apsl/ and read it before using this +* file. +* +* The Original Code and all software distributed under the License are +* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +* Please see the License for the specific language governing rights and +* limitations under the License. +* +* @APPLE_LICENSE_HEADER_END@ +*/ + +#include "objc-private.h" +#include "objc-ptrauth.h" +#include +#include +#include +#include +#include +#include + +int main(int argc, const char *argv[]) +{ + return EX_UNAVAILABLE; +} diff --git a/objcrt/objcrt.vcproj b/objcrt/objcrt.vcproj old mode 100644 new mode 100755 diff --git a/prebuild.bat b/prebuild.bat old mode 100644 new mode 100755 diff --git a/test/gcfiles/x86_64-gconly b/runtime/.DS_Store similarity index 62% rename from test/gcfiles/x86_64-gconly rename to runtime/.DS_Store index fc907e7..5008ddf 100644 Binary files a/test/gcfiles/x86_64-gconly and b/runtime/.DS_Store differ diff --git a/runtime/Messengers.subproj/objc-msg-arm.S b/runtime/Messengers.subproj/objc-msg-arm.S index b1a9aec..4947209 100644 --- a/runtime/Messengers.subproj/objc-msg-arm.S +++ b/runtime/Messengers.subproj/objc-msg-arm.S @@ -222,6 +222,40 @@ LExit$0: .endmacro +////////////////////////////////////////////////////////////////////// +// +// SAVE_REGS +// +// Create a stack frame and save all argument registers in preparation +// for a function call. +////////////////////////////////////////////////////////////////////// + +.macro SAVE_REGS + + stmfd sp!, {r0-r3,r7,lr} + add r7, sp, #16 + sub sp, #8 // align stack + FP_SAVE + +.endmacro + + +////////////////////////////////////////////////////////////////////// +// +// RESTORE_REGS +// +// Restore all argument registers and pop the stack frame created by +// SAVE_REGS. +////////////////////////////////////////////////////////////////////// + +.macro RESTORE_REGS + + FP_RESTORE + add sp, #8 // align stack + ldmfd sp!, {r0-r3,r7,lr} + +.endmacro + ///////////////////////////////////////////////////////////////////// // // CacheLookup NORMAL|STRET @@ -666,10 +700,7 @@ LNilReceiver: .macro MethodTableLookup - stmfd sp!, {r0-r3,r7,lr} - add r7, sp, #16 - sub sp, #8 // align stack - FP_SAVE + SAVE_REGS // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER) .if $0 == NORMAL @@ -680,7 +711,7 @@ LNilReceiver: mov r1, r2 // selector .endif mov r2, r9 // class to search - mov r3, #3 // LOOKUP_INITIALIZE | LOOKUP_INITIALIZE + mov r3, #3 // LOOKUP_INITIALIZE | LOOKUP_RESOLVER blx _lookUpImpOrForward mov r12, r0 // r12 = IMP @@ -690,9 +721,7 @@ LNilReceiver: tst r12, r12 // set ne for stret forwarding .endif - FP_RESTORE - add sp, #8 // align stack - ldmfd sp!, {r0-r3,r7,lr} + RESTORE_REGS .endmacro @@ -819,18 +848,55 @@ LNilReceiver: ENTRY _method_invoke + + // See if this is a small method. + lsls r12, r1, #31 + bne.w L_method_invoke_small + + // We can directly load the IMP from big methods. // r1 is method triplet instead of SEL ldr r12, [r1, #METHOD_IMP] ldr r1, [r1, #METHOD_NAME] bx r12 + +L_method_invoke_small: + // Small methods require a call to handle swizzling. + SAVE_REGS + mov r0, r1 + bl __method_getImplementationAndName + mov r12, r0 + mov r9, r1 + RESTORE_REGS + mov r1, r9 + bx r12 + + END_ENTRY _method_invoke ENTRY _method_invoke_stret + + // See if this is a small method. + lsls r12, r2, #31 + bne.w L_method_invoke_stret_small + + // We can directly load the IMP from big methods. // r2 is method triplet instead of SEL ldr r12, [r2, #METHOD_IMP] ldr r2, [r2, #METHOD_NAME] bx r12 + +L_method_invoke_stret_small: + // Small methods require a call to handle swizzling. + SAVE_REGS + mov r0, r2 + bl __method_getImplementationAndName + mov r12, r0 + mov r9, r1 + RESTORE_REGS + mov r2, r9 + bx r12 + END_ENTRY _method_invoke_stret diff --git a/runtime/Messengers.subproj/objc-msg-arm64.S b/runtime/Messengers.subproj/objc-msg-arm64.S old mode 100644 new mode 100755 index 6bf3f29..7794ad5 --- a/runtime/Messengers.subproj/objc-msg-arm64.S +++ b/runtime/Messengers.subproj/objc-msg-arm64.S @@ -30,8 +30,19 @@ #include #include "isa.h" -#include "arm64-asm.h" #include "objc-config.h" +#include "arm64-asm.h" + +#if TARGET_OS_IPHONE && __LP64__ + .section __TEXT,__objc_methname,cstring_literals +l_MagicSelector: /* the shared cache builder knows about this value */ + .byte 0xf0, 0x9f, 0xa4, 0xaf, 0 + + .section __DATA,__objc_selrefs,literal_pointers,no_dead_strip + .p2align 3 +_MagicSelRef: + .quad l_MagicSelector +#endif .data @@ -57,7 +68,6 @@ _objc_restartableRanges: RestartableEntry _cache_getImp RestartableEntry _objc_msgSend - RestartableEntry _objc_msgSendSuper RestartableEntry _objc_msgSendSuper2 RestartableEntry _objc_msgLookup RestartableEntry _objc_msgLookupSuper2 @@ -81,13 +91,13 @@ _objc_restartableRanges: /******************************************************************** - * GetClassFromIsa_p16 src + * GetClassFromIsa_p16 src, needs_auth, auth_address * src is a raw isa field. Sets p16 to the corresponding class pointer. * The raw isa might be an indexed isa to be decoded, or a * packed isa that needs to be masked. * * On exit: - * $0 is unchanged + * src is unchanged * p16 is a class pointer * x10 is clobbered ********************************************************************/ @@ -99,11 +109,11 @@ _objc_indexed_classes: .fill ISA_INDEX_COUNT, PTRSIZE, 0 #endif -.macro GetClassFromIsa_p16 /* src */ +.macro GetClassFromIsa_p16 src, needs_auth, auth_address /* note: auth_address is not required if !needs_auth */ #if SUPPORT_INDEXED_ISA // Indexed isa - mov p16, $0 // optimistically set dst = src + mov p16, \src // optimistically set dst = src tbz p16, #ISA_INDEX_IS_NPI_BIT, 1f // done if not non-pointer isa // isa in p16 is indexed adrp x10, _objc_indexed_classes@PAGE @@ -113,12 +123,15 @@ _objc_indexed_classes: 1: #elif __LP64__ +.if \needs_auth == 0 // _cache_getImp takes an authed class already + mov p16, \src +.else // 64-bit packed isa - and p16, $0, #ISA_MASK - + ExtractISA p16, \src, \auth_address +.endif #else // 32-bit raw isa - mov p16, $0 + mov p16, \src #endif @@ -169,9 +182,85 @@ LExit$0: #define FrameWithNoSaves 0x04000000 // frame, no non-volatile saves +#define MSGSEND 100 +#define METHOD_INVOKE 101 + +////////////////////////////////////////////////////////////////////// +// +// SAVE_REGS +// +// Create a stack frame and save all argument registers in preparation +// for a function call. +////////////////////////////////////////////////////////////////////// + +.macro SAVE_REGS kind + + // push frame + SignLR + stp fp, lr, [sp, #-16]! + mov fp, sp + + // save parameter registers: x0..x8, q0..q7 + sub sp, sp, #(10*8 + 8*16) + stp q0, q1, [sp, #(0*16)] + stp q2, q3, [sp, #(2*16)] + stp q4, q5, [sp, #(4*16)] + stp q6, q7, [sp, #(6*16)] + stp x0, x1, [sp, #(8*16+0*8)] + stp x2, x3, [sp, #(8*16+2*8)] + stp x4, x5, [sp, #(8*16+4*8)] + stp x6, x7, [sp, #(8*16+6*8)] +.if \kind == MSGSEND + stp x8, x15, [sp, #(8*16+8*8)] + mov x16, x15 // stashed by CacheLookup, restore to x16 +.elseif \kind == METHOD_INVOKE + str x8, [sp, #(8*16+8*8)] +.else +.abort Unknown kind. +.endif + +.endmacro + + +////////////////////////////////////////////////////////////////////// +// +// RESTORE_REGS +// +// Restore all argument registers and pop the stack frame created by +// SAVE_REGS. +////////////////////////////////////////////////////////////////////// + +.macro RESTORE_REGS kind + + ldp q0, q1, [sp, #(0*16)] + ldp q2, q3, [sp, #(2*16)] + ldp q4, q5, [sp, #(4*16)] + ldp q6, q7, [sp, #(6*16)] + ldp x0, x1, [sp, #(8*16+0*8)] + ldp x2, x3, [sp, #(8*16+2*8)] + ldp x4, x5, [sp, #(8*16+4*8)] + ldp x6, x7, [sp, #(8*16+6*8)] +.if \kind == MSGSEND + ldp x8, x16, [sp, #(8*16+8*8)] + orr x16, x16, #2 // for the sake of instrumentations, remember it was the slowpath +.elseif \kind == METHOD_INVOKE + ldr x8, [sp, #(8*16+8*8)] +.else +.abort Unknown kind. +.endif + + mov sp, fp + ldp fp, lr, [sp], #16 + AuthenticateLR + +.endmacro + + /******************************************************************** * - * CacheLookup NORMAL|GETIMP|LOOKUP + * CacheLookup NORMAL|GETIMP|LOOKUP MissLabelDynamic MissLabelConstant + * + * MissLabelConstant is only used for the GETIMP variant. * * Locate the implementation for a selector in a class method cache. * @@ -185,11 +274,27 @@ LExit$0: * x16 = class to be searched * * Kills: - * x9,x10,x11,x12, x17 + * x9,x10,x11,x12,x13,x15,x17 + * + * Untouched: + * x14 * * On exit: (found) calls or returns IMP * with x16 = class, x17 = IMP + * In LOOKUP mode, the two low bits are set to 0x3 + * if we hit a constant cache (used in objc_trace) * (not found) jumps to LCacheMiss + * with x15 = class + * For constant caches in LOOKUP mode, the low bit + * of x16 is set to 0x1 to indicate we had to fallback. + * In addition, when LCacheMiss is __objc_msgSend_uncached or + * __objc_msgLookup_uncached, 0x2 will be set in x16 + * to remember we took the slowpath. + * So the two low bits of x16 on exit mean: + * 0: dynamic hit + * 1: fallback to the parent class, when there is a preoptimized cache + * 2: slowpath + * 3: preoptimized cache hit * ********************************************************************/ @@ -197,60 +302,37 @@ LExit$0: #define GETIMP 1 #define LOOKUP 2 -// CacheHit: x17 = cached IMP, x12 = address of cached IMP, x1 = SEL, x16 = isa +// CacheHit: x17 = cached IMP, x10 = address of buckets, x1 = SEL, x16 = isa .macro CacheHit .if $0 == NORMAL - TailCallCachedImp x17, x12, x1, x16 // authenticate and call imp + TailCallCachedImp x17, x10, x1, x16 // authenticate and call imp .elseif $0 == GETIMP mov p0, p17 cbz p0, 9f // don't ptrauth a nil imp - AuthAndResignAsIMP x0, x12, x1, x16 // authenticate imp and re-sign as IMP + AuthAndResignAsIMP x0, x10, x1, x16 // authenticate imp and re-sign as IMP 9: ret // return IMP .elseif $0 == LOOKUP // No nil check for ptrauth: the caller would crash anyway when they // jump to a nil IMP. We don't care if that jump also fails ptrauth. - AuthAndResignAsIMP x17, x12, x1, x16 // authenticate imp and re-sign as IMP + AuthAndResignAsIMP x17, x10, x1, x16 // authenticate imp and re-sign as IMP + cmp x16, x15 + cinc x16, x16, ne // x16 += 1 when x15 != x16 (for instrumentation ; fallback to the parent class) ret // return imp via x17 .else .abort oops .endif .endmacro -.macro CheckMiss - // miss if bucket->sel == 0 -.if $0 == GETIMP - cbz p9, LGetImpMiss -.elseif $0 == NORMAL - cbz p9, __objc_msgSend_uncached -.elseif $0 == LOOKUP - cbz p9, __objc_msgLookup_uncached -.else -.abort oops -.endif -.endmacro - -.macro JumpMiss -.if $0 == GETIMP - b LGetImpMiss -.elseif $0 == NORMAL - b __objc_msgSend_uncached -.elseif $0 == LOOKUP - b __objc_msgLookup_uncached -.else -.abort oops -.endif -.endmacro - -.macro CacheLookup +.macro CacheLookup Mode, Function, MissLabelDynamic, MissLabelConstant // // Restart protocol: // - // As soon as we're past the LLookupStart$1 label we may have loaded - // an invalid cache pointer or mask. + // As soon as we're past the LLookupStart\Function label we may have + // loaded an invalid cache pointer or mask. // // When task_restartable_ranges_synchronize() is called, - // (or when a signal hits us) before we're past LLookupEnd$1, - // then our PC will be reset to LLookupRecover$1 which forcefully + // (or when a signal hits us) before we're past LLookupEnd\Function, + // then our PC will be reset to LLookupRecover\Function which forcefully // jumps to the cache-miss codepath which have the following // requirements: // @@ -263,70 +345,158 @@ LExit$0: // - x16 contains the isa // - other registers are set as per calling conventions // -LLookupStart$1: + mov x15, x16 // stash the original isa +LLookupStart\Function: // p1 = SEL, p16 = isa - ldr p11, [x16, #CACHE] // p11 = mask|buckets - -#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 +#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS + ldr p10, [x16, #CACHE] // p10 = mask|buckets + lsr p11, p10, #48 // p11 = mask + and p10, p10, #0xffffffffffff // p10 = buckets + and w12, w1, w11 // x12 = _cmd & mask +#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 + ldr p11, [x16, #CACHE] // p11 = mask|buckets +#if CONFIG_USE_PREOPT_CACHES +#if __has_feature(ptrauth_calls) + tbnz p11, #0, LLookupPreopt\Function + and p10, p11, #0x0000ffffffffffff // p10 = buckets +#else + and p10, p11, #0x0000fffffffffffe // p10 = buckets + tbnz p11, #0, LLookupPreopt\Function +#endif + eor p12, p1, p1, LSR #7 + and p12, p12, p11, LSR #48 // x12 = (_cmd ^ (_cmd >> 7)) & mask +#else and p10, p11, #0x0000ffffffffffff // p10 = buckets and p12, p1, p11, LSR #48 // x12 = _cmd & mask +#endif // CONFIG_USE_PREOPT_CACHES #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4 + ldr p11, [x16, #CACHE] // p11 = mask|buckets and p10, p11, #~0xf // p10 = buckets and p11, p11, #0xf // p11 = maskShift mov p12, #0xffff - lsr p11, p12, p11 // p11 = mask = 0xffff >> p11 - and p12, p1, p11 // x12 = _cmd & mask + lsr p11, p12, p11 // p11 = mask = 0xffff >> p11 + and p12, p1, p11 // x12 = _cmd & mask #else #error Unsupported cache mask storage for ARM64. #endif + add p13, p10, p12, LSL #(1+PTRSHIFT) + // p13 = buckets + ((_cmd & mask) << (1+PTRSHIFT)) - add p12, p10, p12, LSL #(1+PTRSHIFT) - // p12 = buckets + ((_cmd & mask) << (1+PTRSHIFT)) + // do { +1: ldp p17, p9, [x13], #-BUCKET_SIZE // {imp, sel} = *bucket-- + cmp p9, p1 // if (sel != _cmd) { + b.ne 3f // scan more + // } else { +2: CacheHit \Mode // hit: call or return imp + // } +3: cbz p9, \MissLabelDynamic // if (sel == 0) goto Miss; + cmp p13, p10 // } while (bucket >= buckets) + b.hs 1b - ldp p17, p9, [x12] // {imp, sel} = *bucket -1: cmp p9, p1 // if (bucket->sel != _cmd) - b.ne 2f // scan more - CacheHit $0 // call or return imp - -2: // not hit: p12 = not-hit bucket - CheckMiss $0 // miss if bucket->sel == 0 - cmp p12, p10 // wrap if bucket == buckets - b.eq 3f - ldp p17, p9, [x12, #-BUCKET_SIZE]! // {imp, sel} = *--bucket - b 1b // loop + // wrap-around: + // p10 = first bucket + // p11 = mask (and maybe other bits on LP64) + // p12 = _cmd & mask + // + // A full cache can happen with CACHE_ALLOW_FULL_UTILIZATION. + // So stop when we circle back to the first probed bucket + // rather than when hitting the first bucket again. + // + // Note that we might probe the initial bucket twice + // when the first probed slot is the last entry. -3: // wrap: p12 = first bucket, w11 = mask -#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 - add p12, p12, p11, LSR #(48 - (1+PTRSHIFT)) - // p12 = buckets + (mask << 1+PTRSHIFT) + +#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS + add p13, p10, w11, UXTW #(1+PTRSHIFT) + // p13 = buckets + (mask << 1+PTRSHIFT) +#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 + add p13, p10, p11, LSR #(48 - (1+PTRSHIFT)) + // p13 = buckets + (mask << 1+PTRSHIFT) + // see comment about maskZeroBits #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4 - add p12, p12, p11, LSL #(1+PTRSHIFT) - // p12 = buckets + (mask << 1+PTRSHIFT) + add p13, p10, p11, LSL #(1+PTRSHIFT) + // p13 = buckets + (mask << 1+PTRSHIFT) #else #error Unsupported cache mask storage for ARM64. #endif + add p12, p10, p12, LSL #(1+PTRSHIFT) + // p12 = first probed bucket - // Clone scanning loop to miss instead of hang when cache is corrupt. - // The slow path may detect any corruption and halt later. + // do { +4: ldp p17, p9, [x13], #-BUCKET_SIZE // {imp, sel} = *bucket-- + cmp p9, p1 // if (sel == _cmd) + b.eq 2b // goto hit + cmp p9, #0 // } while (sel != 0 && + ccmp p13, p12, #0, ne // bucket > first_probed) + b.hi 4b - ldp p17, p9, [x12] // {imp, sel} = *bucket -1: cmp p9, p1 // if (bucket->sel != _cmd) - b.ne 2f // scan more - CacheHit $0 // call or return imp - -2: // not hit: p12 = not-hit bucket - CheckMiss $0 // miss if bucket->sel == 0 - cmp p12, p10 // wrap if bucket == buckets - b.eq 3f - ldp p17, p9, [x12, #-BUCKET_SIZE]! // {imp, sel} = *--bucket - b 1b // loop +LLookupEnd\Function: +LLookupRecover\Function: + b \MissLabelDynamic -LLookupEnd$1: -LLookupRecover$1: -3: // double wrap - JumpMiss $0 +#if CONFIG_USE_PREOPT_CACHES +#if CACHE_MASK_STORAGE != CACHE_MASK_STORAGE_HIGH_16 +#error config unsupported +#endif +LLookupPreopt\Function: +#if __has_feature(ptrauth_calls) + and p10, p11, #0x007ffffffffffffe // p10 = buckets + autdb x10, x16 // auth as early as possible +#endif + + // x12 = (_cmd - first_shared_cache_sel) + adrp x9, _MagicSelRef@PAGE + ldr p9, [x9, _MagicSelRef@PAGEOFF] + sub p12, p1, p9 + + // w9 = ((_cmd - first_shared_cache_sel) >> hash_shift & hash_mask) +#if __has_feature(ptrauth_calls) + // bits 63..60 of x11 are the number of bits in hash_mask + // bits 59..55 of x11 is hash_shift + + lsr x17, x11, #55 // w17 = (hash_shift, ...) + lsr w9, w12, w17 // >>= shift + + lsr x17, x11, #60 // w17 = mask_bits + mov x11, #0x7fff + lsr x11, x11, x17 // p11 = mask (0x7fff >> mask_bits) + and x9, x9, x11 // &= mask +#else + // bits 63..53 of x11 is hash_mask + // bits 52..48 of x11 is hash_shift + lsr x17, x11, #48 // w17 = (hash_shift, hash_mask) + lsr w9, w12, w17 // >>= shift + and x9, x9, x11, LSR #53 // &= mask +#endif + + ldr x17, [x10, x9, LSL #3] // x17 == sel_offs | (imp_offs << 32) + cmp x12, w17, uxtw + +.if \Mode == GETIMP + b.ne \MissLabelConstant // cache miss + sub x0, x16, x17, LSR #32 // imp = isa - imp_offs + SignAsImp x0 + ret +.else + b.ne 5f // cache miss + sub x17, x16, x17, LSR #32 // imp = isa - imp_offs +.if \Mode == NORMAL + br x17 +.elseif \Mode == LOOKUP + orr x16, x16, #3 // for instrumentation, note that we hit a constant cache + SignAsImp x17 + ret +.else +.abort unhandled mode \Mode +.endif + +5: ldursw x9, [x10, #-8] // offset -8 is the fallback offset + add x16, x16, x9 // compute the fallback isa + b LLookupStart\Function // lookup again with a new isa +.endif +#endif // CONFIG_USE_PREOPT_CACHES .endmacro @@ -345,12 +515,37 @@ LLookupRecover$1: #if SUPPORT_TAGGED_POINTERS .data .align 3 - .globl _objc_debug_taggedpointer_classes -_objc_debug_taggedpointer_classes: - .fill 16, 8, 0 .globl _objc_debug_taggedpointer_ext_classes _objc_debug_taggedpointer_ext_classes: .fill 256, 8, 0 + +// Dispatch for split tagged pointers take advantage of the fact that +// the extended tag classes array immediately precedes the standard +// tag array. The .alt_entry directive ensures that the two stay +// together. This is harmless when using non-split tagged pointers. + .globl _objc_debug_taggedpointer_classes + .alt_entry _objc_debug_taggedpointer_classes +_objc_debug_taggedpointer_classes: + .fill 16, 8, 0 + +// Look up the class for a tagged pointer in x0, placing it in x16. +.macro GetTaggedClass + + and x10, x0, #0x7 // x10 = small tag + asr x11, x0, #55 // x11 = large tag with 1s filling the top (because bit 63 is 1 on a tagged pointer) + cmp x10, #7 // tag == 7? + csel x12, x11, x10, eq // x12 = index in tagged pointer classes array, negative for extended tags. + // The extended tag array is placed immediately before the basic tag array + // so this looks into the right place either way. The sign extension done + // by the asr instruction produces the value extended_tag - 256, which produces + // the correct index in the extended tagged pointer classes array. + + // x16 = _objc_debug_taggedpointer_classes[x12] + adrp x10, _objc_debug_taggedpointer_classes@PAGE + add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF + ldr x16, [x10, x12, LSL #3] + +.endmacro #endif ENTRY _objc_msgSend @@ -363,30 +558,15 @@ _objc_debug_taggedpointer_ext_classes: b.eq LReturnZero #endif ldr p13, [x0] // p13 = isa - GetClassFromIsa_p16 p13 // p16 = class + GetClassFromIsa_p16 p13, 1, x0 // p16 = class LGetIsaDone: // calls imp or objc_msgSend_uncached - CacheLookup NORMAL, _objc_msgSend + CacheLookup NORMAL, _objc_msgSend, __objc_msgSend_uncached #if SUPPORT_TAGGED_POINTERS LNilOrTagged: b.eq LReturnZero // nil check - - // tagged - adrp x10, _objc_debug_taggedpointer_classes@PAGE - add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF - ubfx x11, x0, #60, #4 - ldr x16, [x10, x11, LSL #3] - adrp x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGE - add x10, x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGEOFF - cmp x10, x16 - b.ne LGetIsaDone - - // ext tagged - adrp x10, _objc_debug_taggedpointer_ext_classes@PAGE - add x10, x10, _objc_debug_taggedpointer_ext_classes@PAGEOFF - ubfx x11, x0, #52, #8 - ldr x16, [x10, x11, LSL #3] + GetTaggedClass b LGetIsaDone // SUPPORT_TAGGED_POINTERS #endif @@ -412,37 +592,22 @@ LReturnZero: b.eq LLookup_Nil #endif ldr p13, [x0] // p13 = isa - GetClassFromIsa_p16 p13 // p16 = class + GetClassFromIsa_p16 p13, 1, x0 // p16 = class LLookup_GetIsaDone: // returns imp - CacheLookup LOOKUP, _objc_msgLookup + CacheLookup LOOKUP, _objc_msgLookup, __objc_msgLookup_uncached #if SUPPORT_TAGGED_POINTERS LLookup_NilOrTagged: b.eq LLookup_Nil // nil check - - // tagged - adrp x10, _objc_debug_taggedpointer_classes@PAGE - add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF - ubfx x11, x0, #60, #4 - ldr x16, [x10, x11, LSL #3] - adrp x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGE - add x10, x10, _OBJC_CLASS_$___NSUnrecognizedTaggedPointer@PAGEOFF - cmp x10, x16 - b.ne LLookup_GetIsaDone - -LLookup_ExtTag: - adrp x10, _objc_debug_taggedpointer_ext_classes@PAGE - add x10, x10, _objc_debug_taggedpointer_ext_classes@PAGEOFF - ubfx x11, x0, #52, #8 - ldr x16, [x10, x11, LSL #3] + GetTaggedClass b LLookup_GetIsaDone // SUPPORT_TAGGED_POINTERS #endif LLookup_Nil: - adrp x17, __objc_msgNil@PAGE - add x17, x17, __objc_msgNil@PAGEOFF + adr x17, __objc_msgNil + SignAsImp x17 ret END_ENTRY _objc_msgLookup @@ -465,8 +630,7 @@ LLookup_Nil: UNWIND _objc_msgSendSuper, NoFrame ldp p0, p16, [x0] // p0 = real receiver, p16 = class - // calls imp or objc_msgSend_uncached - CacheLookup NORMAL, _objc_msgSendSuper + b L_objc_msgSendSuper2_body END_ENTRY _objc_msgSendSuper @@ -475,9 +639,18 @@ LLookup_Nil: ENTRY _objc_msgSendSuper2 UNWIND _objc_msgSendSuper2, NoFrame +#if __has_feature(ptrauth_calls) + ldp x0, x17, [x0] // x0 = real receiver, x17 = class + add x17, x17, #SUPERCLASS // x17 = &class->superclass + ldr x16, [x17] // x16 = class->superclass + AuthISASuper x16, x17, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS +LMsgSendSuperResume: +#else ldp p0, p16, [x0] // p0 = real receiver, p16 = class ldr p16, [x16, #SUPERCLASS] // p16 = class->superclass - CacheLookup NORMAL, _objc_msgSendSuper2 +#endif +L_objc_msgSendSuper2_body: + CacheLookup NORMAL, _objc_msgSendSuper2, __objc_msgSend_uncached END_ENTRY _objc_msgSendSuper2 @@ -485,31 +658,24 @@ LLookup_Nil: ENTRY _objc_msgLookupSuper2 UNWIND _objc_msgLookupSuper2, NoFrame +#if __has_feature(ptrauth_calls) + ldp x0, x17, [x0] // x0 = real receiver, x17 = class + add x17, x17, #SUPERCLASS // x17 = &class->superclass + ldr x16, [x17] // x16 = class->superclass + AuthISASuper x16, x17, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS +LMsgLookupSuperResume: +#else ldp p0, p16, [x0] // p0 = real receiver, p16 = class ldr p16, [x16, #SUPERCLASS] // p16 = class->superclass - CacheLookup LOOKUP, _objc_msgLookupSuper2 +#endif + CacheLookup LOOKUP, _objc_msgLookupSuper2, __objc_msgLookup_uncached END_ENTRY _objc_msgLookupSuper2 .macro MethodTableLookup - // push frame - SignLR - stp fp, lr, [sp, #-16]! - mov fp, sp - - // save parameter registers: x0..x8, q0..q7 - sub sp, sp, #(10*8 + 8*16) - stp q0, q1, [sp, #(0*16)] - stp q2, q3, [sp, #(2*16)] - stp q4, q5, [sp, #(4*16)] - stp q6, q7, [sp, #(6*16)] - stp x0, x1, [sp, #(8*16+0*8)] - stp x2, x3, [sp, #(8*16+2*8)] - stp x4, x5, [sp, #(8*16+4*8)] - stp x6, x7, [sp, #(8*16+6*8)] - str x8, [sp, #(8*16+8*8)] + SAVE_REGS MSGSEND // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER) // receiver and selector already in x0 and x1 @@ -519,21 +685,8 @@ LLookup_Nil: // IMP in x0 mov x17, x0 - - // restore registers and return - ldp q0, q1, [sp, #(0*16)] - ldp q2, q3, [sp, #(2*16)] - ldp q4, q5, [sp, #(4*16)] - ldp q6, q7, [sp, #(6*16)] - ldp x0, x1, [sp, #(8*16+0*8)] - ldp x2, x3, [sp, #(8*16+2*8)] - ldp x4, x5, [sp, #(8*16+4*8)] - ldp x6, x7, [sp, #(8*16+6*8)] - ldr x8, [sp, #(8*16+8*8)] - mov sp, fp - ldp fp, lr, [sp], #16 - AuthenticateLR + RESTORE_REGS MSGSEND .endmacro @@ -541,7 +694,7 @@ LLookup_Nil: UNWIND __objc_msgSend_uncached, FrameWithNoSaves // THIS IS NOT A CALLABLE C FUNCTION - // Out-of-band p16 is the class to search + // Out-of-band p15 is the class to search MethodTableLookup TailCallFunctionPointer x17 @@ -553,7 +706,7 @@ LLookup_Nil: UNWIND __objc_msgLookup_uncached, FrameWithNoSaves // THIS IS NOT A CALLABLE C FUNCTION - // Out-of-band p16 is the class to search + // Out-of-band p15 is the class to search MethodTableLookup ret @@ -563,13 +716,17 @@ LLookup_Nil: STATIC_ENTRY _cache_getImp - GetClassFromIsa_p16 p0 - CacheLookup GETIMP, _cache_getImp + GetClassFromIsa_p16 p0, 0 + CacheLookup GETIMP, _cache_getImp, LGetImpMissDynamic, LGetImpMissConstant -LGetImpMiss: +LGetImpMissDynamic: mov p0, #0 ret +LGetImpMissConstant: + mov p0, p2 + ret + END_ENTRY _cache_getImp @@ -615,11 +772,37 @@ LGetImpMiss: ENTRY _method_invoke + + // See if this is a small method. + tbnz p1, #0, L_method_invoke_small + + // We can directly load the IMP from big methods. // x1 is method triplet instead of SEL add p16, p1, #METHOD_IMP ldr p17, [x16] ldr p1, [x1, #METHOD_NAME] TailCallMethodListImp x17, x16 + +L_method_invoke_small: + // Small methods require a call to handle swizzling. + SAVE_REGS METHOD_INVOKE + mov p0, p1 + bl __method_getImplementationAndName + // ARM64_32 packs both return values into x0, with SEL in the high bits and IMP in the low. + // ARM64 just returns them in x0 and x1. + mov x17, x0 +#if __LP64__ + mov x16, x1 +#endif + RESTORE_REGS METHOD_INVOKE +#if __LP64__ + mov x1, x16 +#else + lsr x1, x17, #32 + mov w17, w17 +#endif + TailCallFunctionPointer x17 + END_ENTRY _method_invoke #endif diff --git a/runtime/Messengers.subproj/objc-msg-simulator-i386.S b/runtime/Messengers.subproj/objc-msg-simulator-i386.S index 727b983..914a9ac 100644 --- a/runtime/Messengers.subproj/objc-msg-simulator-i386.S +++ b/runtime/Messengers.subproj/objc-msg-simulator-i386.S @@ -192,6 +192,47 @@ LExit$0: #define FrameWithNoSaves 0x01000000 // frame, no non-volatile saves +////////////////////////////////////////////////////////////////////// +// +// SAVE_REGS +// +// Create a stack frame and save all argument registers in preparation +// for a function call. +////////////////////////////////////////////////////////////////////// + +.macro SAVE_REGS + + pushl %ebp + movl %esp, %ebp + + subl $$(8+5*16), %esp + + movdqa %xmm3, 4*16(%esp) + movdqa %xmm2, 3*16(%esp) + movdqa %xmm1, 2*16(%esp) + movdqa %xmm0, 1*16(%esp) + +.endmacro + + +////////////////////////////////////////////////////////////////////// +// +// RESTORE_REGS +// +// Restore all argument registers and pop the stack frame created by +// SAVE_REGS. +////////////////////////////////////////////////////////////////////// + +.macro RESTORE_REGS + + movdqa 4*16(%esp), %xmm3 + movdqa 3*16(%esp), %xmm2 + movdqa 2*16(%esp), %xmm1 + movdqa 1*16(%esp), %xmm0 + + leave + +.endmacro ///////////////////////////////////////////////////////////////////// // // CacheLookup return-type, caller @@ -314,10 +355,7 @@ LExit$0: ///////////////////////////////////////////////////////////////////// .macro MethodTableLookup - pushl %ebp - movl %esp, %ebp - - subl $$(8+5*16), %esp + SAVE_REGS .if $0 == NORMAL movl self+4(%ebp), %eax @@ -327,11 +365,6 @@ LExit$0: movl selector_stret+4(%ebp), %ecx .endif - movdqa %xmm3, 4*16(%esp) - movdqa %xmm2, 3*16(%esp) - movdqa %xmm1, 2*16(%esp) - movdqa %xmm0, 1*16(%esp) - // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER) movl $$3, 12(%esp) // LOOKUP_INITIALIZE | LOOKUP_RESOLVER movl %edx, 8(%esp) // class @@ -341,18 +374,13 @@ LExit$0: // imp in eax - movdqa 4*16(%esp), %xmm3 - movdqa 3*16(%esp), %xmm2 - movdqa 2*16(%esp), %xmm1 - movdqa 1*16(%esp), %xmm0 - .if $0 == NORMAL test %eax, %eax // set ne for stret forwarding .else cmp %eax, %eax // set eq for nonstret forwarding .endif - leave + RESTORE_REGS .endmacro @@ -906,23 +934,55 @@ L_forward_stret_handler: ENTRY _method_invoke + // See if this is a small method. + testb $1, selector(%esp) + jnz L_method_invoke_small + + // We can directly load the IMP from big methods. movl selector(%esp), %ecx movl method_name(%ecx), %edx movl method_imp(%ecx), %eax movl %edx, selector(%esp) jmp *%eax - + +L_method_invoke_small: + // Small methods require a call to handle swizzling. + SAVE_REGS + + movl selector+4(%ebp), %eax + movl %eax, 0(%esp) + call __method_getImplementationAndName + RESTORE_REGS + movl %edx, selector(%esp) + jmp *%eax + END_ENTRY _method_invoke ENTRY _method_invoke_stret + // See if this is a small method. + testb $1, selector_stret(%esp) + jnz L_method_invoke_stret_small + + // We can directly load the IMP from big methods. movl selector_stret(%esp), %ecx movl method_name(%ecx), %edx movl method_imp(%ecx), %eax movl %edx, selector_stret(%esp) jmp *%eax +L_method_invoke_stret_small: + // Small methods require a call to handle swizzling. + SAVE_REGS + + movl selector_stret+4(%ebp), %eax + movl %eax, 0(%esp) + call __method_getImplementationAndName + RESTORE_REGS + movl %edx, selector_stret(%esp) + jmp *%eax + END_ENTRY _method_invoke_stret diff --git a/runtime/Messengers.subproj/objc-msg-simulator-x86_64.S b/runtime/Messengers.subproj/objc-msg-simulator-x86_64.S index a5410c4..402b97d 100644 --- a/runtime/Messengers.subproj/objc-msg-simulator-x86_64.S +++ b/runtime/Messengers.subproj/objc-msg-simulator-x86_64.S @@ -22,7 +22,7 @@ */ #include -#if __x86_64__ && TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC +#if __x86_64__ && TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST /******************************************************************** ******************************************************************** @@ -93,6 +93,7 @@ _objc_restartableRanges: #define a2b sil #define a3 rdx #define a3d edx +#define a3b dl #define a4 rcx #define a4d ecx #define a5 r8 @@ -132,6 +133,10 @@ _objc_restartableRanges: #define GETIMP 101 #define LOOKUP 102 +#define MSGSEND 200 +#define METHOD_INVOKE 201 +#define METHOD_INVOKE_STRET 202 + /******************************************************************** * @@ -212,6 +217,88 @@ LExit$0: #define FrameWithNoSaves 0x01000000 // frame, no non-volatile saves +////////////////////////////////////////////////////////////////////// +// +// SAVE_REGS +// +// Create a stack frame and save all argument registers in preparation +// for a function call. +////////////////////////////////////////////////////////////////////// + +.macro SAVE_REGS kind + +.if \kind != MSGSEND && \kind != METHOD_INVOKE && \kind != METHOD_INVOKE_STRET +.abort Unknown kind. +.endif + push %rbp + mov %rsp, %rbp + + sub $0x80, %rsp + + movdqa %xmm0, -0x80(%rbp) + push %rax // might be xmm parameter count + movdqa %xmm1, -0x70(%rbp) + push %a1 + movdqa %xmm2, -0x60(%rbp) +.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET + push %a2 +.endif + movdqa %xmm3, -0x50(%rbp) +.if \kind == MSGSEND || \kind == METHOD_INVOKE + push %a3 +.endif + movdqa %xmm4, -0x40(%rbp) + push %a4 + movdqa %xmm5, -0x30(%rbp) + push %a5 + movdqa %xmm6, -0x20(%rbp) + push %a6 + movdqa %xmm7, -0x10(%rbp) +.if \kind == MSGSEND + push %r10 +.endif + +.endmacro + + +////////////////////////////////////////////////////////////////////// +// +// RESTORE_REGS +// +// Restore all argument registers and pop the stack frame created by +// SAVE_REGS. +////////////////////////////////////////////////////////////////////// + +.macro RESTORE_REGS kind + +.if \kind == MSGSEND + pop %r10 + orq $2, %r10 // for the sake of instrumentations, remember it was the slowpath +.endif + movdqa -0x80(%rbp), %xmm0 + pop %a6 + movdqa -0x70(%rbp), %xmm1 + pop %a5 + movdqa -0x60(%rbp), %xmm2 + pop %a4 + movdqa -0x50(%rbp), %xmm3 +.if \kind == MSGSEND || \kind == METHOD_INVOKE + pop %a3 +.endif + movdqa -0x40(%rbp), %xmm4 +.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET + pop %a2 +.endif + movdqa -0x30(%rbp), %xmm5 + pop %a1 + movdqa -0x20(%rbp), %xmm6 + pop %rax + movdqa -0x10(%rbp), %xmm7 + leave + +.endmacro + + ///////////////////////////////////////////////////////////////////// // // CacheLookup return-type, caller @@ -347,26 +434,7 @@ LExit$0: .macro MethodTableLookup - push %rbp - mov %rsp, %rbp - - sub $$0x80+8, %rsp // +8 for alignment - - movdqa %xmm0, -0x80(%rbp) - push %rax // might be xmm parameter count - movdqa %xmm1, -0x70(%rbp) - push %a1 - movdqa %xmm2, -0x60(%rbp) - push %a2 - movdqa %xmm3, -0x50(%rbp) - push %a3 - movdqa %xmm4, -0x40(%rbp) - push %a4 - movdqa %xmm5, -0x30(%rbp) - push %a5 - movdqa %xmm6, -0x20(%rbp) - push %a6 - movdqa %xmm7, -0x10(%rbp) + SAVE_REGS MSGSEND // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER) .if $0 == NORMAL @@ -383,29 +451,13 @@ LExit$0: // IMP is now in %rax movq %rax, %r11 - movdqa -0x80(%rbp), %xmm0 - pop %a6 - movdqa -0x70(%rbp), %xmm1 - pop %a5 - movdqa -0x60(%rbp), %xmm2 - pop %a4 - movdqa -0x50(%rbp), %xmm3 - pop %a3 - movdqa -0x40(%rbp), %xmm4 - pop %a2 - movdqa -0x30(%rbp), %xmm5 - pop %a1 - movdqa -0x20(%rbp), %xmm6 - pop %rax - movdqa -0x10(%rbp), %xmm7 + RESTORE_REGS MSGSEND .if $0 == NORMAL test %r11, %r11 // set ne for stret forwarding .else cmp %r11, %r11 // set eq for nonstret forwarding .endif - - leave .endmacro @@ -1104,19 +1156,49 @@ LCacheMiss: ENTRY _method_invoke + // See if this is a small method. + testb $1, %a2b + jnz L_method_invoke_small + + // We can directly load the IMP from big methods. movq method_imp(%a2), %r11 movq method_name(%a2), %a2 jmp *%r11 - + +L_method_invoke_small: + // Small methods require a call to handle swizzling. + SAVE_REGS METHOD_INVOKE + movq %a2, %a1 + call __method_getImplementationAndName + movq %rdx, %a2 + movq %rax, %r11 + RESTORE_REGS METHOD_INVOKE + jmp *%r11 + END_ENTRY _method_invoke ENTRY _method_invoke_stret + // See if this is a small method. + testb $1, %a3b + jnz L_method_invoke_stret_small + + // We can directly load the IMP from big methods. movq method_imp(%a3), %r11 movq method_name(%a3), %a3 jmp *%r11 - + +L_method_invoke_stret_small: + // Small methods require a call to handle swizzling. + SAVE_REGS METHOD_INVOKE_STRET + movq %a3, %a1 + call __method_getImplementationAndName + movq %rdx, %a3 + movq %rax, %r11 + RESTORE_REGS METHOD_INVOKE_STRET + jmp *%r11 + END_ENTRY _method_invoke_stret diff --git a/runtime/Messengers.subproj/objc-msg-x86_64.S b/runtime/Messengers.subproj/objc-msg-x86_64.S index 8fc6d48..0b8eff7 100644 --- a/runtime/Messengers.subproj/objc-msg-x86_64.S +++ b/runtime/Messengers.subproj/objc-msg-x86_64.S @@ -22,7 +22,7 @@ */ #include -#if __x86_64__ && !(TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC) +#if __x86_64__ && !(TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST) #include "isa.h" @@ -102,6 +102,7 @@ _objc_restartableRanges: #define a2b sil #define a3 rdx #define a3d edx +#define a3b dl #define a4 rcx #define a4d ecx #define a5 r8 @@ -138,6 +139,10 @@ _objc_restartableRanges: #define GETIMP 101 #define LOOKUP 102 +#define MSGSEND 200 +#define METHOD_INVOKE 201 +#define METHOD_INVOKE_STRET 202 + /******************************************************************** * @@ -218,6 +223,88 @@ LExit$0: #define FrameWithNoSaves 0x01000000 // frame, no non-volatile saves +////////////////////////////////////////////////////////////////////// +// +// SAVE_REGS +// +// Create a stack frame and save all argument registers in preparation +// for a function call. +////////////////////////////////////////////////////////////////////// + +.macro SAVE_REGS kind + +.if \kind != MSGSEND && \kind != METHOD_INVOKE && \kind != METHOD_INVOKE_STRET +.abort Unknown kind. +.endif + push %rbp + mov %rsp, %rbp + + sub $0x80, %rsp + + movdqa %xmm0, -0x80(%rbp) + push %rax // might be xmm parameter count + movdqa %xmm1, -0x70(%rbp) + push %a1 + movdqa %xmm2, -0x60(%rbp) +.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET + push %a2 +.endif + movdqa %xmm3, -0x50(%rbp) +.if \kind == MSGSEND || \kind == METHOD_INVOKE + push %a3 +.endif + movdqa %xmm4, -0x40(%rbp) + push %a4 + movdqa %xmm5, -0x30(%rbp) + push %a5 + movdqa %xmm6, -0x20(%rbp) + push %a6 + movdqa %xmm7, -0x10(%rbp) +.if \kind == MSGSEND + push %r10 +.endif + +.endmacro + + +////////////////////////////////////////////////////////////////////// +// +// RESTORE_REGS +// +// Restore all argument registers and pop the stack frame created by +// SAVE_REGS. +////////////////////////////////////////////////////////////////////// + +.macro RESTORE_REGS kind + +.if \kind == MSGSEND + pop %r10 + orq $2, %r10 // for the sake of instrumentations, remember it was the slowpath +.endif + movdqa -0x80(%rbp), %xmm0 + pop %a6 + movdqa -0x70(%rbp), %xmm1 + pop %a5 + movdqa -0x60(%rbp), %xmm2 + pop %a4 + movdqa -0x50(%rbp), %xmm3 +.if \kind == MSGSEND || \kind == METHOD_INVOKE + pop %a3 +.endif + movdqa -0x40(%rbp), %xmm4 +.if \kind == MSGSEND || \kind == METHOD_INVOKE_STRET + pop %a2 +.endif + movdqa -0x30(%rbp), %xmm5 + pop %a1 + movdqa -0x20(%rbp), %xmm6 + pop %rax + movdqa -0x10(%rbp), %xmm7 + leave + +.endmacro + + ///////////////////////////////////////////////////////////////////// // // CacheLookup return-type, caller, function @@ -382,26 +469,7 @@ LLookupEnd$2: .macro MethodTableLookup - push %rbp - mov %rsp, %rbp - - sub $$0x80+8, %rsp // +8 for alignment - - movdqa %xmm0, -0x80(%rbp) - push %rax // might be xmm parameter count - movdqa %xmm1, -0x70(%rbp) - push %a1 - movdqa %xmm2, -0x60(%rbp) - push %a2 - movdqa %xmm3, -0x50(%rbp) - push %a3 - movdqa %xmm4, -0x40(%rbp) - push %a4 - movdqa %xmm5, -0x30(%rbp) - push %a5 - movdqa %xmm6, -0x20(%rbp) - push %a6 - movdqa %xmm7, -0x10(%rbp) + SAVE_REGS MSGSEND // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER) .if $0 == NORMAL @@ -418,29 +486,13 @@ LLookupEnd$2: // IMP is now in %rax movq %rax, %r11 - movdqa -0x80(%rbp), %xmm0 - pop %a6 - movdqa -0x70(%rbp), %xmm1 - pop %a5 - movdqa -0x60(%rbp), %xmm2 - pop %a4 - movdqa -0x50(%rbp), %xmm3 - pop %a3 - movdqa -0x40(%rbp), %xmm4 - pop %a2 - movdqa -0x30(%rbp), %xmm5 - pop %a1 - movdqa -0x20(%rbp), %xmm6 - pop %rax - movdqa -0x10(%rbp), %xmm7 + RESTORE_REGS MSGSEND .if $0 == NORMAL test %r11, %r11 // set ne for nonstret forwarding .else cmp %r11, %r11 // set eq for stret forwarding .endif - - leave .endmacro @@ -1216,19 +1268,49 @@ LCacheMiss_objc_msgLookupSuper2_stret: ENTRY _method_invoke + // See if this is a small method. + testb $1, %a2b + jnz L_method_invoke_small + + // We can directly load the IMP from big methods. movq method_imp(%a2), %r11 movq method_name(%a2), %a2 jmp *%r11 - + +L_method_invoke_small: + // Small methods require a call to handle swizzling. + SAVE_REGS METHOD_INVOKE + movq %a2, %a1 + call __method_getImplementationAndName + movq %rdx, %a2 + movq %rax, %r11 + RESTORE_REGS METHOD_INVOKE + jmp *%r11 + END_ENTRY _method_invoke ENTRY _method_invoke_stret + // See if this is a small method. + testb $1, %a3b + jnz L_method_invoke_stret_small + + // We can directly load the IMP from big methods. movq method_imp(%a3), %r11 movq method_name(%a3), %a3 jmp *%r11 - + +L_method_invoke_stret_small: + // Small methods require a call to handle swizzling. + SAVE_REGS METHOD_INVOKE_STRET + movq %a3, %a1 + call __method_getImplementationAndName + movq %rdx, %a3 + movq %rax, %r11 + RESTORE_REGS METHOD_INVOKE_STRET + jmp *%r11 + END_ENTRY _method_invoke_stret diff --git a/runtime/NSObject-internal.h b/runtime/NSObject-internal.h index c23fbc2..978799a 100644 --- a/runtime/NSObject-internal.h +++ b/runtime/NSObject-internal.h @@ -123,6 +123,16 @@ struct magic_t { class AutoreleasePoolPage; struct AutoreleasePoolPageData { +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS + struct AutoreleasePoolEntry { + uintptr_t ptr: 48; + uintptr_t count: 16; + + static const uintptr_t maxCount = 65535; // 2^16 - 1 + }; + static_assert((AutoreleasePoolEntry){ .ptr = MACH_VM_MAX_ADDRESS }.ptr == MACH_VM_MAX_ADDRESS, "MACH_VM_MAX_ADDRESS doesn't fit into AutoreleasePoolEntry::ptr!"); +#endif + magic_t const magic; __unsafe_unretained id *next; pthread_t const thread; diff --git a/runtime/NSObject.mm b/runtime/NSObject.mm index 982e4bb..6d2e14f 100644 --- a/runtime/NSObject.mm +++ b/runtime/NSObject.mm @@ -39,6 +39,12 @@ #include #include #include "NSObject-internal.h" +#include + +extern "C" { +#include +#include +} @interface NSInvocation - (SEL)selector; @@ -51,7 +57,13 @@ OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset = __buil OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset = __builtin_offsetof(AutoreleasePoolPageData, child); OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset = __builtin_offsetof(AutoreleasePoolPageData, depth); OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset = __builtin_offsetof(AutoreleasePoolPageData, hiwat); +OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_begin_offset = sizeof(AutoreleasePoolPageData); #if __OBJC2__ +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS +OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask = (AutoreleasePoolPageData::AutoreleasePoolEntry){ .ptr = ~(uintptr_t)0 }.ptr; +#else +OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask = ~(uintptr_t)0; +#endif OBJC_EXTERN const uint32_t objc_class_abi_version = OBJC_CLASS_ABI_VERSION_MAX; #endif @@ -79,8 +91,42 @@ void _objc_setBadAllocHandler(id(*newHandler)(Class)) } +static id _initializeSwiftRefcountingThenCallRetain(id objc); +static void _initializeSwiftRefcountingThenCallRelease(id objc); + +explicit_atomic swiftRetain{&_initializeSwiftRefcountingThenCallRetain}; +explicit_atomic swiftRelease{&_initializeSwiftRefcountingThenCallRelease}; + +static void _initializeSwiftRefcounting() { + void *const token = dlopen("/usr/lib/swift/libswiftCore.dylib", RTLD_LAZY | RTLD_LOCAL); + ASSERT(token); + swiftRetain.store((id(*)(id))dlsym(token, "swift_retain"), memory_order_relaxed); + ASSERT(swiftRetain.load(memory_order_relaxed)); + swiftRelease.store((void(*)(id))dlsym(token, "swift_release"), memory_order_relaxed); + ASSERT(swiftRelease.load(memory_order_relaxed)); + dlclose(token); +} + +static id _initializeSwiftRefcountingThenCallRetain(id objc) { + _initializeSwiftRefcounting(); + return swiftRetain.load(memory_order_relaxed)(objc); +} + +static void _initializeSwiftRefcountingThenCallRelease(id objc) { + _initializeSwiftRefcounting(); + swiftRelease.load(memory_order_relaxed)(objc); +} + +namespace objc { + extern int PageCountWarning; +} + namespace { +#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR +uint32_t numFaults = 0; +#endif + // The order of these bits is important. #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0) #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit @@ -221,6 +267,23 @@ void SideTableLocksSucceedLocks(StripedMap& oldlocks) { } } +// Call out to the _setWeaklyReferenced method on obj, if implemented. +static void callSetWeaklyReferenced(id obj) { + if (!obj) + return; + + Class cls = obj->getIsa(); + + if (slowpath(cls->hasCustomRR() && !object_isClass(obj))) { + ASSERT(((objc_class *)cls)->isInitializing() || ((objc_class *)cls)->isInitialized()); + void (*setWeaklyReferenced)(id, SEL) = (void(*)(id, SEL)) + class_getMethodImplementation(cls, @selector(_setWeaklyReferenced)); + if ((IMP)setWeaklyReferenced != _objc_msgForward) { + (*setWeaklyReferenced)(obj, @selector(_setWeaklyReferenced)); + } + } +} + // // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block} // @@ -269,7 +332,7 @@ enum CrashIfDeallocating { DontCrashIfDeallocating = false, DoCrashIfDeallocating = true }; template + enum CrashIfDeallocating crashIfDeallocating> static id storeWeak(id *location, objc_object *newObj) { @@ -336,11 +399,11 @@ storeWeak(id *location, objc_object *newObj) if (haveNew) { newObj = (objc_object *) weak_register_no_lock(&newTable->weak_table, (id)newObj, location, - crashIfDeallocating); + crashIfDeallocating ? CrashIfDeallocating : ReturnNilIfDeallocating); // weak_register_no_lock returns nil if weak store should be rejected // Set is-weakly-referenced bit in refcount table. - if (newObj && !newObj->isTaggedPointer()) { + if (!newObj->isTaggedPointerOrNil()) { newObj->setWeaklyReferenced_nolock(); } @@ -353,6 +416,12 @@ storeWeak(id *location, objc_object *newObj) SideTable::unlockTwo(oldTable, newTable); + // This must be called without the locks held, as it can invoke + // arbitrary code. In particular, even if _setWeaklyReferenced + // is not implemented, resolveInstanceMethod: may be, and may + // call back into the weak reference machinery. + callSetWeaklyReferenced((id)newObj); + return (id)newObj; } @@ -474,8 +543,7 @@ objc_loadWeakRetained(id *location) retry: // fixme std::atomic this load obj = *location; - if (!obj) return nil; - if (obj->isTaggedPointer()) return obj; + if (obj->isTaggedPointerOrNil()) return obj; table = &SideTables()[obj]; @@ -499,9 +567,12 @@ objc_loadWeakRetained(id *location) else { // Slow case. We must check for +initialize and call it outside // the lock if necessary in order to avoid deadlocks. + // Use lookUpImpOrForward so we can avoid the assert in + // class_getInstanceMethod, since we intentionally make this + // callout with the lock held. if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) { BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL)) - class_getMethodImplementation(cls, @selector(retainWeakReference)); + lookUpImpOrForwardTryCache(obj, @selector(retainWeakReference), cls); if ((IMP)tryRetain == _objc_msgForward) { result = nil; } @@ -572,9 +643,28 @@ objc_copyWeak(id *dst, id *src) void objc_moveWeak(id *dst, id *src) { - objc_copyWeak(dst, src); - objc_destroyWeak(src); + id obj; + SideTable *table; + +retry: + obj = *src; + if (obj == nil) { + *dst = nil; + return; + } + + table = &SideTables()[obj]; + table->lock(); + if (*src != obj) { + table->unlock(); + goto retry; + } + + weak_unregister_no_lock(&table->weak_table, obj, src); + weak_register_no_lock(&table->weak_table, obj, dst, DontCheckDeallocating); + *dst = obj; *src = nil; + table->unlock(); } @@ -611,6 +701,7 @@ private: static pthread_key_t const key = AUTORELEASE_POOL_KEY; static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing static size_t const COUNT = SIZE / sizeof(id); + static size_t const MAX_FAULTS = 2; // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is // pushed and it has never contained any objects. This saves memory @@ -643,13 +734,33 @@ private: #endif } + void checkTooMuchAutorelease() + { +#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR + bool objcModeNoFaults = DisableFaults || getpid() == 1 || + !os_variant_has_internal_diagnostics("com.apple.obj-c"); + if (!objcModeNoFaults) { + if (depth+1 >= (uint32_t)objc::PageCountWarning && numFaults < MAX_FAULTS) { //depth is 0 when first page is allocated + os_fault_with_payload(OS_REASON_LIBSYSTEM, + OS_REASON_LIBSYSTEM_CODE_FAULT, + NULL, 0, "Large Autorelease Pool", 0); + numFaults++; + } + } +#endif + } + AutoreleasePoolPage(AutoreleasePoolPage *newParent) : AutoreleasePoolPageData(begin(), objc_thread_self(), newParent, newParent ? 1+newParent->depth : 0, newParent ? newParent->hiwat : 0) - { + { + if (objc::PageCountWarning != -1) { + checkTooMuchAutorelease(); + } + if (parent) { parent->check(); ASSERT(!parent->child); @@ -744,8 +855,49 @@ private: { ASSERT(!full()); unprotect(); - id *ret = next; // faster than `return next-1` because of aliasing + id *ret; + +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS + if (!DisableAutoreleaseCoalescing || !DisableAutoreleaseCoalescingLRU) { + if (!DisableAutoreleaseCoalescingLRU) { + if (!empty() && (obj != POOL_BOUNDARY)) { + AutoreleasePoolEntry *topEntry = (AutoreleasePoolEntry *)next - 1; + for (uintptr_t offset = 0; offset < 4; offset++) { + AutoreleasePoolEntry *offsetEntry = topEntry - offset; + if (offsetEntry <= (AutoreleasePoolEntry*)begin() || *(id *)offsetEntry == POOL_BOUNDARY) { + break; + } + if (offsetEntry->ptr == (uintptr_t)obj && offsetEntry->count < AutoreleasePoolEntry::maxCount) { + if (offset > 0) { + AutoreleasePoolEntry found = *offsetEntry; + memmove(offsetEntry, offsetEntry + 1, offset * sizeof(*offsetEntry)); + *topEntry = found; + } + topEntry->count++; + ret = (id *)topEntry; // need to reset ret + goto done; + } + } + } + } else { + if (!empty() && (obj != POOL_BOUNDARY)) { + AutoreleasePoolEntry *prevEntry = (AutoreleasePoolEntry *)next - 1; + if (prevEntry->ptr == (uintptr_t)obj && prevEntry->count < AutoreleasePoolEntry::maxCount) { + prevEntry->count++; + ret = (id *)prevEntry; // need to reset ret + goto done; + } + } + } + } +#endif + ret = next; // faster than `return next-1` because of aliasing *next++ = obj; +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS + // Make sure obj fits in the bits available for it + ASSERT(((AutoreleasePoolEntry *)ret)->ptr == (uintptr_t)obj); +#endif + done: protect(); return ret; } @@ -772,12 +924,28 @@ private: } page->unprotect(); +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS + AutoreleasePoolEntry* entry = (AutoreleasePoolEntry*) --page->next; + + // create an obj with the zeroed out top byte and release that + id obj = (id)entry->ptr; + int count = (int)entry->count; // grab these before memset +#else id obj = *--page->next; +#endif memset((void*)page->next, SCRIBBLE, sizeof(*page->next)); page->protect(); if (obj != POOL_BOUNDARY) { +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS + // release count+1 times since it is count of the additional + // autoreleases beyond the first one + for (int i = 0; i < count + 1; i++) { + objc_release(obj); + } +#else objc_release(obj); +#endif } } @@ -984,10 +1152,13 @@ private: public: static inline id autorelease(id obj) { - ASSERT(obj); - ASSERT(!obj->isTaggedPointer()); + ASSERT(!obj->isTaggedPointerOrNil()); id *dest __unused = autoreleaseFast(obj); +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS + ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || (id)((AutoreleasePoolEntry *)dest)->ptr == obj); +#else ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj); +#endif return obj; } @@ -1024,9 +1195,9 @@ public: _objc_inform_now_and_on_crash ("Invalid or prematurely-freed autorelease pool %p. " "Set a breakpoint on objc_autoreleasePoolInvalid to debug. " - "Proceeding anyway because the app is old " - "(SDK version " SDK_FORMAT "). Memory errors are likely.", - token, FORMAT_SDK(sdkVersion())); + "Proceeding anyway because the app is old. Memory errors " + "are likely.", + token); } objc_autoreleasePoolInvalid(token); } @@ -1127,8 +1298,19 @@ public: if (*p == POOL_BOUNDARY) { _objc_inform("[%p] ################ POOL %p", p, p); } else { - _objc_inform("[%p] %#16lx %s", +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS + AutoreleasePoolEntry *entry = (AutoreleasePoolEntry *)p; + if (entry->count > 0) { + id obj = (id)entry->ptr; + _objc_inform("[%p] %#16lx %s autorelease count %u", + p, (unsigned long)obj, object_getClassName(obj), + entry->count + 1); + goto done; + } +#endif + _objc_inform("[%p] %#16lx %s", p, (unsigned long)*p, object_getClassName(*p)); + done:; } } } @@ -1161,6 +1343,20 @@ public: _objc_inform("##############"); } +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS + __attribute__((noinline, cold)) + unsigned sumOfExtraReleases() + { + unsigned sumOfExtraReleases = 0; + for (id *p = begin(); p < next; p++) { + if (*p != POOL_BOUNDARY) { + sumOfExtraReleases += ((AutoreleasePoolEntry *)p)->count; + } + } + return sumOfExtraReleases; + } +#endif + __attribute__((noinline, cold)) static void printHiwat() { @@ -1168,16 +1364,29 @@ public: // Ignore high water marks under 256 to suppress noise. AutoreleasePoolPage *p = hotPage(); uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin()); - if (mark > p->hiwat && mark > 256) { + if (mark > p->hiwat + 256) { +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS + unsigned sumOfExtraReleases = 0; +#endif for( ; p; p = p->parent) { p->unprotect(); p->hiwat = mark; p->protect(); + +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS + sumOfExtraReleases += p->sumOfExtraReleases(); +#endif } _objc_inform("POOL HIGHWATER: new high water mark of %u " "pending releases for thread %p:", mark, objc_thread_self()); +#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS + if (sumOfExtraReleases > 0) { + _objc_inform("POOL HIGHWATER: extra sequential autoreleases of objects: %u", + sumOfExtraReleases); + } +#endif void *stack[128]; int count = backtrace(stack, sizeof(stack)/sizeof(stack[0])); @@ -1201,14 +1410,14 @@ public: NEVER_INLINE id objc_object::rootRetain_overflow(bool tryRetain) { - return rootRetain(tryRetain, true); + return rootRetain(tryRetain, RRVariant::Full); } NEVER_INLINE uintptr_t objc_object::rootRelease_underflow(bool performDealloc) { - return rootRelease(performDealloc, true); + return rootRelease(performDealloc, RRVariant::Full); } @@ -1317,7 +1526,7 @@ objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc, ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0); uintptr_t carry; - size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry); + size_t refcnt = addc(oldRefcnt, (extra_rc - 1) << SIDE_TABLE_RC_SHIFT, 0, &carry); if (carry) refcnt = SIDE_TABLE_RC_PINNED; if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING; if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED; @@ -1359,7 +1568,7 @@ objc_object::sidetable_addExtraRC_nolock(size_t delta_rc) // Move some retain counts from the side table to the isa field. // Returns the actual count subtracted, which may be less than the request. -size_t +objc_object::SidetableBorrow objc_object::sidetable_subExtraRC_nolock(size_t delta_rc) { ASSERT(isa.nonpointer); @@ -1368,7 +1577,7 @@ objc_object::sidetable_subExtraRC_nolock(size_t delta_rc) RefcountMap::iterator it = table.refcnts.find(this); if (it == table.refcnts.end() || it->second == 0) { // Side table retain count is zero. Can't borrow. - return 0; + return { 0, 0 }; } size_t oldRefcnt = it->second; @@ -1379,7 +1588,7 @@ objc_object::sidetable_subExtraRC_nolock(size_t delta_rc) size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT); ASSERT(oldRefcnt > newRefcnt); // shouldn't underflow it->second = newRefcnt; - return delta_rc; + return { delta_rc, newRefcnt >> SIDE_TABLE_RC_SHIFT }; } @@ -1394,19 +1603,29 @@ objc_object::sidetable_getExtraRC_nolock() } +void +objc_object::sidetable_clearExtraRC_nolock() +{ + ASSERT(isa.nonpointer); + SideTable& table = SideTables()[this]; + RefcountMap::iterator it = table.refcnts.find(this); + table.refcnts.erase(it); +} + + // SUPPORT_NONPOINTER_ISA #endif id -objc_object::sidetable_retain() +objc_object::sidetable_retain(bool locked) { #if SUPPORT_NONPOINTER_ISA ASSERT(!isa.nonpointer); #endif SideTable& table = SideTables()[this]; - table.lock(); + if (!locked) table.lock(); size_t& refcntStorage = table.refcnts[this]; if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) { refcntStorage += SIDE_TABLE_RC_ONE; @@ -1505,6 +1724,14 @@ objc_object::sidetable_isWeaklyReferenced() return result; } +#if OBJC_WEAK_FORMATION_CALLOUT_DEFINED +//Clients can dlsym() for this symbol to see if an ObjC supporting +//-_setWeaklyReferenced is present +OBJC_EXPORT const uintptr_t _objc_has_weak_formation_callout = 0; +static_assert(SUPPORT_NONPOINTER_ISA, "Weak formation callout must only be defined when nonpointer isa is supported."); +#else +static_assert(!SUPPORT_NONPOINTER_ISA, "If weak callout is not present then we must not support nonpointer isas."); +#endif void objc_object::sidetable_setWeaklyReferenced_nolock() @@ -1512,9 +1739,9 @@ objc_object::sidetable_setWeaklyReferenced_nolock() #if SUPPORT_NONPOINTER_ISA ASSERT(!isa.nonpointer); #endif - + SideTable& table = SideTables()[this]; - + table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED; } @@ -1523,7 +1750,7 @@ objc_object::sidetable_setWeaklyReferenced_nolock() // return uintptr_t instead of bool so that the various raw-isa // -release paths all return zero in eax uintptr_t -objc_object::sidetable_release(bool performDealloc) +objc_object::sidetable_release(bool locked, bool performDealloc) { #if SUPPORT_NONPOINTER_ISA ASSERT(!isa.nonpointer); @@ -1532,7 +1759,7 @@ objc_object::sidetable_release(bool performDealloc) bool do_dealloc = false; - table.lock(); + if (!locked) table.lock(); auto it = table.refcnts.try_emplace(this, SIDE_TABLE_DEALLOCATING); auto &refcnt = it.first->second; if (it.second) { @@ -1583,8 +1810,7 @@ __attribute__((aligned(16), flatten, noinline)) id objc_retain(id obj) { - if (!obj) return obj; - if (obj->isTaggedPointer()) return obj; + if (obj->isTaggedPointerOrNil()) return obj; return obj->retain(); } @@ -1593,8 +1819,7 @@ __attribute__((aligned(16), flatten, noinline)) void objc_release(id obj) { - if (!obj) return; - if (obj->isTaggedPointer()) return; + if (obj->isTaggedPointerOrNil()) return; return obj->release(); } @@ -1603,8 +1828,7 @@ __attribute__((aligned(16), flatten, noinline)) id objc_autorelease(id obj) { - if (!obj) return obj; - if (obj->isTaggedPointer()) return obj; + if (obj->isTaggedPointerOrNil()) return obj; return obj->autorelease(); } @@ -1694,8 +1918,7 @@ _objc_rootRelease(id obj) obj->rootRelease(); } - -// Call [cls alloc] or [cls allocWithZone:nil], with appropriate +// Call [cls alloc] or [cls allocWithZone:nil], with appropriate // shortcutting optimizations. static ALWAYS_INLINE id callAlloc(Class cls, bool checkNil, bool allocWithZone=false) @@ -1731,7 +1954,7 @@ objc_alloc(Class cls) } // Calls [cls allocWithZone:nil]. -id +id objc_allocWithZone(Class cls) { return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/); @@ -1750,7 +1973,7 @@ objc_opt_new(Class cls) { #if __OBJC2__ if (fastpath(cls && !cls->ISA()->hasCustomCore())) { - return [callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/) init]; + return [callAlloc(cls, false/*checkNil*/) init]; } #endif return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(new)); @@ -1761,7 +1984,7 @@ id objc_opt_self(id obj) { #if __OBJC2__ - if (fastpath(!obj || obj->isTaggedPointer() || !obj->ISA()->hasCustomCore())) { + if (fastpath(obj->isTaggedPointerOrNil() || !obj->ISA()->hasCustomCore())) { return obj; } #endif @@ -1790,7 +2013,7 @@ objc_opt_isKindOfClass(id obj, Class otherClass) if (slowpath(!obj)) return NO; Class cls = obj->getIsa(); if (fastpath(!cls->hasCustomCore())) { - for (Class tcls = cls; tcls; tcls = tcls->superclass) { + for (Class tcls = cls; tcls; tcls = tcls->getSuperclass()) { if (tcls == otherClass) return YES; } return NO; @@ -1978,16 +2201,6 @@ void arr_init(void) _objc_associations_init(); } -#ifdef DARLING -// see libdispatch -#if __has_attribute(objc_nonlazy_class) -#define NONLAZY_CLASS __attribute__((objc_nonlazy_class)) -#define NONLAZY_CLASS_LOAD -#else -#define NONLAZY_CLASS -#define NONLAZY_CLASS_LOAD + (void)load {} -#endif -#endif #if SUPPORT_TAGGED_POINTERS @@ -1997,15 +2210,8 @@ void arr_init(void) @interface __NSUnrecognizedTaggedPointer : NSObject @end -#ifdef DARLING -NONLAZY_CLASS -#else __attribute__((objc_nonlazy_class)) -#endif @implementation __NSUnrecognizedTaggedPointer -#ifdef DARLING -NONLAZY_CLASS_LOAD -#endif -(id) retain { return self; } -(oneway void) release { } -(id) autorelease { return self; } @@ -2013,15 +2219,8 @@ NONLAZY_CLASS_LOAD #endif -#ifdef DARLING -NONLAZY_CLASS -#else __attribute__((objc_nonlazy_class)) -#endif @implementation NSObject -#ifdef DARLING -NONLAZY_CLASS_LOAD -#endif + (void)initialize { } @@ -2043,11 +2242,11 @@ NONLAZY_CLASS_LOAD } + (Class)superclass { - return self->superclass; + return self->getSuperclass(); } - (Class)superclass { - return [self class]->superclass; + return [self class]->getSuperclass(); } + (BOOL)isMemberOfClass:(Class)cls { @@ -2059,28 +2258,28 @@ NONLAZY_CLASS_LOAD } + (BOOL)isKindOfClass:(Class)cls { - for (Class tcls = self->ISA(); tcls; tcls = tcls->superclass) { + for (Class tcls = self->ISA(); tcls; tcls = tcls->getSuperclass()) { if (tcls == cls) return YES; } return NO; } - (BOOL)isKindOfClass:(Class)cls { - for (Class tcls = [self class]; tcls; tcls = tcls->superclass) { + for (Class tcls = [self class]; tcls; tcls = tcls->getSuperclass()) { if (tcls == cls) return YES; } return NO; } + (BOOL)isSubclassOfClass:(Class)cls { - for (Class tcls = self; tcls; tcls = tcls->superclass) { + for (Class tcls = self; tcls; tcls = tcls->getSuperclass()) { if (tcls == cls) return YES; } return NO; } + (BOOL)isAncestorOfObject:(NSObject *)obj { - for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) { + for (Class tcls = [obj class]; tcls; tcls = tcls->getSuperclass()) { if (tcls == self) return YES; } return NO; @@ -2100,7 +2299,7 @@ NONLAZY_CLASS_LOAD + (BOOL)conformsToProtocol:(Protocol *)protocol { if (!protocol) return NO; - for (Class tcls = self; tcls; tcls = tcls->superclass) { + for (Class tcls = self; tcls; tcls = tcls->getSuperclass()) { if (class_conformsToProtocol(tcls, protocol)) return YES; } return NO; @@ -2108,7 +2307,7 @@ NONLAZY_CLASS_LOAD - (BOOL)conformsToProtocol:(Protocol *)protocol { if (!protocol) return NO; - for (Class tcls = [self class]; tcls; tcls = tcls->superclass) { + for (Class tcls = [self class]; tcls; tcls = tcls->getSuperclass()) { if (class_conformsToProtocol(tcls, protocol)) return YES; } return NO; diff --git a/runtime/PointerUnion.h b/runtime/PointerUnion.h index 85b7846..1108673 100644 --- a/runtime/PointerUnion.h +++ b/runtime/PointerUnion.h @@ -59,12 +59,12 @@ struct PointerUnionTypeSelectorReturn< typename PointerUnionTypeSelector::Return; }; -template +template class PointerUnion { uintptr_t _value; - static_assert(alignof(PT1) >= 2, "alignment requirement"); - static_assert(alignof(PT2) >= 2, "alignment requirement"); + static_assert(alignof(T1) >= 2, "alignment requirement"); + static_assert(alignof(T2) >= 2, "alignment requirement"); struct IsPT1 { static const uintptr_t Num = 0; @@ -85,8 +85,12 @@ public: explicit PointerUnion(const std::atomic &raw) : _value(raw.load(std::memory_order_relaxed)) { } - PointerUnion(PT1 t) : _value((uintptr_t)t) { } - PointerUnion(PT2 t) : _value((uintptr_t)t | 1) { } + PointerUnion(T1 *t, const void *address) { + _value = (uintptr_t)Auth1::sign(t, address); + } + PointerUnion(T2 *t, const void *address) { + _value = (uintptr_t)Auth2::sign(t, address) | 1; + } void storeAt(std::atomic &raw, std::memory_order order) const { raw.store(_value, order); @@ -94,20 +98,24 @@ public: template bool is() const { - using Ty = typename PointerUnionTypeSelector>>::Return; return getTag() == Ty::Num; } - template T get() const { - ASSERT(is() && "Invalid accessor called"); - return reinterpret_cast(getPointer()); + template T get(const void *address) const { + ASSERT(is() && "Invalid accessor called"); + using AuthT = typename PointerUnionTypeSelector>>::Return; + + return AuthT::auth((T)getPointer(), address); } - template T dyn_cast() const { + template T dyn_cast(const void *address) const { if (is()) - return get(); + return get(address); return T(); } }; diff --git a/runtime/Protocol.mm b/runtime/Protocol.mm index d5c4873..9432267 100644 --- a/runtime/Protocol.mm +++ b/runtime/Protocol.mm @@ -45,47 +45,21 @@ // by CF, so __IncompleteProtocol would be left without an R/R implementation // otherwise, which would break ARC. -#ifdef DARLING -// see libdispatch -#if __has_attribute(objc_nonlazy_class) -#define NONLAZY_CLASS __attribute__((objc_nonlazy_class)) -#define NONLAZY_CLASS_LOAD -#else -#define NONLAZY_CLASS -#define NONLAZY_CLASS_LOAD + (void)load {} -#endif -#endif - @interface __IncompleteProtocol : NSObject @end #if __OBJC2__ -#ifdef DARLING -NONLAZY_CLASS -#else __attribute__((objc_nonlazy_class)) #endif -#endif @implementation __IncompleteProtocol -#ifdef DARLING -NONLAZY_CLASS_LOAD -#endif @end #if __OBJC2__ -#ifdef DARLING -NONLAZY_CLASS -#else __attribute__((objc_nonlazy_class)) #endif -#endif @implementation Protocol -#ifdef DARLING -NONLAZY_CLASS_LOAD -#endif - - (BOOL) conformsTo: (Protocol *)aProtocolObj { return protocol_conformsToProtocol(self, aProtocolObj); @@ -126,7 +100,7 @@ NONLAZY_CLASS_LOAD // check isKindOf: Class cls; Class protoClass = objc_getClass("Protocol"); - for (cls = object_getClass(other); cls; cls = cls->superclass) { + for (cls = object_getClass(other); cls; cls = cls->getSuperclass()) { if (cls == protoClass) break; } if (!cls) return NO; diff --git a/runtime/arm64-asm.h b/runtime/arm64-asm.h index fb15e5e..a6f7d38 100644 --- a/runtime/arm64-asm.h +++ b/runtime/arm64-asm.h @@ -28,6 +28,8 @@ #if __arm64__ +#include "objc-config.h" + #if __LP64__ // true arm64 @@ -129,11 +131,35 @@ // note: assumes the imp is not nil eor $1, $1, $2 // mix SEL into ptrauth modifier eor $1, $1, $3 // mix isa into ptrauth modifier - autib $0, $1 // authenticate cached imp + autib $0, $1 // authenticate cached imp ldr xzr, [$0] // crash if authentication failed paciza $0 // resign cached imp as IMP .endmacro +.macro ExtractISA + and $0, $1, #ISA_MASK +#if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_STRIP + xpacd $0 +#elif ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH + mov x10, $2 + movk x10, #ISA_SIGNING_DISCRIMINATOR, LSL #48 + autda $0, x10 +#endif +.endmacro + +.macro AuthISASuper dst, addr_mutable, discriminator +#if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH + movk \addr_mutable, #\discriminator, LSL #48 + autda \dst, \addr_mutable +#elif ISA_SIGNING_AUTH_MODE == ISA_SIGNING_STRIP + xpacd \dst +#endif +.endmacro + +.macro SignAsImp + paciza $0 +.endmacro + // JOP #else // not JOP @@ -162,7 +188,14 @@ .macro AuthAndResignAsIMP // $0 = cached imp, $1 = address of cached imp, $2 = SEL eor $0, $0, $3 -.endmacro +.endmacro + +.macro SignAsImp +.endmacro + +.macro ExtractISA + and $0, $1, #ISA_MASK +.endmacro // not JOP #endif diff --git a/runtime/dummy-library-mac-i386.c b/runtime/dummy-library-mac-i386.c new file mode 100644 index 0000000..a8cd20f --- /dev/null +++ b/runtime/dummy-library-mac-i386.c @@ -0,0 +1,356 @@ +// This file contains stubs matching the sybols previously exported by libobjc +// when i386 Mac was actually supported. These stubs allow us to tease apart the +// dependencies to prepare for removing i386 Mac libobjc entirely. +// +// This file is not built when building for any other arch/OS combination. When +// building for i386 Mac, no other source files are built, just this one. This +// is handled using the Included/Excluded Source File Names settings in Xcode, +// with arch/OS-specific overrides. +// +// rdar://problem/58541885 + +#pragma GCC visibility push(default) +const char ___ld_hide_os10_5__objc_class_name_NSObject __asm__("$ld$hide$os10.5$.objc_class_name_NSObject"); +const char ___ld_hide_os10_6__objc_class_name_NSObject __asm__("$ld$hide$os10.6$.objc_class_name_NSObject"); +const char ___ld_hide_os10_7__objc_class_name_NSObject __asm__("$ld$hide$os10.7$.objc_class_name_NSObject"); +const char ___objc_class_name_List __asm__(".objc_class_name_List"); +const char ___objc_class_name_NSObject __asm__(".objc_class_name_NSObject"); +const char ___objc_class_name_Object __asm__(".objc_class_name_Object"); +const char ___objc_class_name_Protocol __asm__(".objc_class_name_Protocol"); +void NXCompareHashTables(void) {} +void NXCompareMapTables(void) {} +void NXCopyHashTable(void) {} +void NXCopyStringBuffer(void) {} +void NXCopyStringBufferFromZone(void) {} +void NXCountHashTable(void) {} +void NXCountMapTable(void) {} +void NXCreateHashTable(void) {} +void NXCreateHashTableFromZone(void) {} +void NXCreateMapTable(void) {} +void NXCreateMapTableFromZone(void) {} +void NXEmptyHashTable(void) {} +void NXFreeHashTable(void) {} +void NXFreeMapTable(void) {} +void NXHashGet(void) {} +void NXHashInsert(void) {} +void NXHashInsertIfAbsent(void) {} +void NXHashMember(void) {} +void NXHashRemove(void) {} +void NXInitHashState(void) {} +void NXInitMapState(void) {} +void NXMapGet(void) {} +void NXMapInsert(void) {} +void NXMapMember(void) {} +void NXMapRemove(void) {} +void NXNextHashState(void) {} +void NXNextMapState(void) {} +void NXNoEffectFree(void) {} +const char NXObjectMapPrototype; +void NXPtrHash(void) {} +void NXPtrIsEqual(void) {} +const char NXPtrPrototype; +const char NXPtrStructKeyPrototype; +const char NXPtrValueMapPrototype; +void NXReallyFree(void) {} +void NXResetHashTable(void) {} +void NXResetMapTable(void) {} +void NXStrHash(void) {} +void NXStrIsEqual(void) {} +const char NXStrPrototype; +const char NXStrStructKeyPrototype; +const char NXStrValueMapPrototype; +void NXUniqueString(void) {} +void NXUniqueStringNoCopy(void) {} +void NXUniqueStringWithLength(void) {} +char _alloc; +void _class_getIvarMemoryManagement(void) {} +void _class_isFutureClass(void) {} +void _class_isSwift(void) {} +char _copy; +char _dealloc; +char _error; +void _objcInit(void) {} +void _objc_addWillInitializeClassFunc(void) {} +void _objc_atfork_child(void) {} +void _objc_atfork_parent(void) {} +void _objc_atfork_prepare(void) {} +void _objc_autoreleasePoolPop(void) {} +void _objc_autoreleasePoolPrint(void) {} +void _objc_autoreleasePoolPush(void) {} +void _objc_deallocOnMainThreadHelper(void) {} +const char _objc_debug_class_hash; +const char _objc_empty_cache; +void _objc_error(void) {} +void _objc_flush_caches(void) {} +void _objc_getFreedObjectClass(void) {} +void _objc_init(void) {} +void _objc_msgForward(void) {} +void _objc_msgForward_stret(void) {} +void _objc_resolve_categories_for_class(void) {} +void _objc_rootAlloc(void) {} +void _objc_rootAllocWithZone(void) {} +void _objc_rootAutorelease(void) {} +void _objc_rootDealloc(void) {} +void _objc_rootFinalize(void) {} +void _objc_rootHash(void) {} +void _objc_rootInit(void) {} +void _objc_rootIsDeallocating(void) {} +void _objc_rootRelease(void) {} +void _objc_rootReleaseWasZero(void) {} +void _objc_rootRetain(void) {} +void _objc_rootRetainCount(void) {} +void _objc_rootTryRetain(void) {} +void _objc_rootZone(void) {} +void _objc_setBadAllocHandler(void) {} +void _objc_setClassLoader(void) {} +void _protocol_getMethodTypeEncoding(void) {} +char _realloc; +char _zoneAlloc; +char _zoneCopy; +char _zoneRealloc; +void class_addIvar(void) {} +void class_addMethod(void) {} +void class_addMethods(void) {} +void class_addProperty(void) {} +void class_addProtocol(void) {} +void class_conformsToProtocol(void) {} +void class_copyIvarList(void) {} +void class_copyMethodList(void) {} +void class_copyPropertyList(void) {} +void class_copyProtocolList(void) {} +void class_createInstance(void) {} +void class_createInstanceFromZone(void) {} +void class_createInstances(void) {} +void class_getClassMethod(void) {} +void class_getClassVariable(void) {} +void class_getImageName(void) {} +void class_getInstanceMethod(void) {} +void class_getInstanceSize(void) {} +void class_getInstanceVariable(void) {} +void class_getIvarLayout(void) {} +void class_getMethodImplementation(void) {} +void class_getMethodImplementation_stret(void) {} +void class_getName(void) {} +void class_getProperty(void) {} +void class_getSuperclass(void) {} +void class_getVersion(void) {} +void class_getWeakIvarLayout(void) {} +void class_isMetaClass(void) {} +void class_lookupMethod(void) {} +void class_nextMethodList(void) {} +void class_poseAs(void) {} +void class_removeMethods(void) {} +void class_replaceMethod(void) {} +void class_replaceProperty(void) {} +void class_respondsToMethod(void) {} +void class_respondsToSelector(void) {} +void class_setIvarLayout(void) {} +void class_setSuperclass(void) {} +void class_setVersion(void) {} +void class_setWeakIvarLayout(void) {} +void gdb_class_getClass(void) {} +void gdb_object_getClass(void) {} +void imp_getBlock(void) {} +void imp_implementationWithBlock(void) {} +void imp_removeBlock(void) {} +void instrumentObjcMessageSends(void) {} +void ivar_getName(void) {} +void ivar_getOffset(void) {} +void ivar_getTypeEncoding(void) {} +void method_copyArgumentType(void) {} +void method_copyReturnType(void) {} +void method_exchangeImplementations(void) {} +void method_getArgumentType(void) {} +void method_getDescription(void) {} +void method_getImplementation(void) {} +void method_getName(void) {} +void method_getNumberOfArguments(void) {} +void method_getReturnType(void) {} +void method_getSizeOfArguments(void) {} +void method_getTypeEncoding(void) {} +void method_invoke(void) {} +void method_invoke_stret(void) {} +void method_setImplementation(void) {} +void objc_addClass(void) {} +void objc_addLoadImageFunc(void) {} +void objc_alloc(void) {} +void objc_allocWithZone(void) {} +void objc_alloc_init(void) {} +void objc_allocateClassPair(void) {} +void objc_allocateProtocol(void) {} +void objc_allocate_object(void) {} +void objc_appRequiresGC(void) {} +void objc_assertRegisteredThreadWithCollector(void) {} +void objc_assign_global(void) {} +void objc_assign_ivar(void) {} +void objc_assign_strongCast(void) {} +void objc_assign_threadlocal(void) {} +void objc_assign_weak(void) {} +void objc_atomicCompareAndSwapGlobal(void) {} +void objc_atomicCompareAndSwapGlobalBarrier(void) {} +void objc_atomicCompareAndSwapInstanceVariable(void) {} +void objc_atomicCompareAndSwapInstanceVariableBarrier(void) {} +void objc_atomicCompareAndSwapPtr(void) {} +void objc_atomicCompareAndSwapPtrBarrier(void) {} +void objc_autorelease(void) {} +void objc_autoreleasePoolPop(void) {} +void objc_autoreleasePoolPush(void) {} +void objc_autoreleaseReturnValue(void) {} +void objc_clear_deallocating(void) {} +void objc_clear_stack(void) {} +void objc_collect(void) {} +void objc_collect_init(void) {} +void objc_collectableZone(void) {} +void objc_collectingEnabled(void) {} +void objc_collecting_enabled(void) {} +void objc_constructInstance(void) {} +void objc_copyClassList(void) {} +void objc_copyClassNamesForImage(void) {} +void objc_copyClassNamesForImageHeader(void) {} +void objc_copyCppObjectAtomic(void) {} +void objc_copyImageNames(void) {} +void objc_copyProtocolList(void) {} +void objc_copyStruct(void) {} +void objc_copyWeak(void) {} +const char objc_debug_autoreleasepoolpage_child_offset; +const char objc_debug_autoreleasepoolpage_depth_offset; +const char objc_debug_autoreleasepoolpage_hiwat_offset; +const char objc_debug_autoreleasepoolpage_magic_offset; +const char objc_debug_autoreleasepoolpage_next_offset; +const char objc_debug_autoreleasepoolpage_parent_offset; +const char objc_debug_autoreleasepoolpage_thread_offset; +void objc_destroyWeak(void) {} +void objc_destructInstance(void) {} +void objc_disposeClassPair(void) {} +void objc_dumpHeap(void) {} +void objc_duplicateClass(void) {} +void objc_enumerationMutation(void) {} +void objc_exception_extract(void) {} +void objc_exception_get_functions(void) {} +void objc_exception_match(void) {} +void objc_exception_set_functions(void) {} +void objc_exception_throw(void) {} +void objc_exception_try_enter(void) {} +void objc_exception_try_exit(void) {} +void objc_finalizeOnMainThread(void) {} +void objc_getAssociatedObject(void) {} +void objc_getClass(void) {} +void objc_getClassList(void) {} +void objc_getClasses(void) {} +void objc_getFutureClass(void) {} +void objc_getMetaClass(void) {} +void objc_getOrigClass(void) {} +void objc_getProperty(void) {} +void objc_getProtocol(void) {} +void objc_getRequiredClass(void) {} +void objc_initWeak(void) {} +void objc_initWeakOrNil(void) {} +void objc_initializeClassPair(void) {} +void objc_isAuto(void) {} +void objc_is_finalized(void) {} +void objc_loadModule(void) {} +void objc_loadModules(void) {} +void objc_loadWeak(void) {} +void objc_loadWeakRetained(void) {} +void objc_lookUpClass(void) {} +void objc_memmove_collectable(void) {} +void objc_moveWeak(void) {} +void objc_msgSend(void) {} +void objc_msgSendSuper(void) {} +void objc_msgSendSuper_stret(void) {} +void objc_msgSend_fpret(void) {} +void objc_msgSend_stret(void) {} +void objc_msgSendv(void) {} +void objc_msgSendv_fpret(void) {} +void objc_msgSendv_stret(void) {} +void objc_opt_class(void) {} +void objc_opt_isKindOfClass(void) {} +void objc_opt_new(void) {} +void objc_opt_respondsToSelector(void) {} +void objc_opt_self(void) {} +void objc_read_weak(void) {} +void objc_registerClassPair(void) {} +void objc_registerProtocol(void) {} +void objc_registerThreadWithCollector(void) {} +void objc_release(void) {} +void objc_removeAssociatedObjects(void) {} +void objc_retain(void) {} +void objc_retainAutorelease(void) {} +void objc_retainAutoreleaseReturnValue(void) {} +void objc_retainAutoreleasedReturnValue(void) {} +void objc_retainBlock(void) {} +void objc_retain_autorelease(void) {} +void objc_retainedObject(void) {} +void objc_setAssociatedObject(void) {} +void objc_setClassHandler(void) {} +void objc_setCollectionRatio(void) {} +void objc_setCollectionThreshold(void) {} +void objc_setEnumerationMutationHandler(void) {} +void objc_setForwardHandler(void) {} +void objc_setHook_getImageName(void) {} +void objc_setMultithreaded(void) {} +void objc_setProperty(void) {} +void objc_setProperty_atomic(void) {} +void objc_setProperty_atomic_copy(void) {} +void objc_setProperty_nonatomic(void) {} +void objc_setProperty_nonatomic_copy(void) {} +void objc_set_collection_ratio(void) {} +void objc_set_collection_threshold(void) {} +void objc_should_deallocate(void) {} +void objc_startCollectorThread(void) {} +void objc_start_collector_thread(void) {} +void objc_storeStrong(void) {} +void objc_storeWeak(void) {} +void objc_storeWeakOrNil(void) {} +void objc_sync_enter(void) {} +void objc_sync_exit(void) {} +void objc_sync_try_enter(void) {} +void objc_unloadModules(void) {} +void objc_unregisterThreadWithCollector(void) {} +void objc_unretainedObject(void) {} +void objc_unretainedPointer(void) {} +void objc_unsafeClaimAutoreleasedReturnValue(void) {} +void object_copy(void) {} +void object_copyFromZone(void) {} +void object_dispose(void) {} +void object_getClass(void) {} +void object_getClassName(void) {} +void object_getIndexedIvars(void) {} +void object_getInstanceVariable(void) {} +void object_getIvar(void) {} +void object_getMethodImplementation(void) {} +void object_getMethodImplementation_stret(void) {} +void object_isClass(void) {} +void object_realloc(void) {} +void object_reallocFromZone(void) {} +void object_setClass(void) {} +void object_setInstanceVariable(void) {} +void object_setInstanceVariableWithStrongDefault(void) {} +void object_setIvar(void) {} +void object_setIvarWithStrongDefault(void) {} +void property_copyAttributeList(void) {} +void property_copyAttributeValue(void) {} +void property_getAttributes(void) {} +void property_getName(void) {} +void protocol_addMethodDescription(void) {} +void protocol_addProperty(void) {} +void protocol_addProtocol(void) {} +void protocol_conformsToProtocol(void) {} +void protocol_copyMethodDescriptionList(void) {} +void protocol_copyPropertyList(void) {} +void protocol_copyPropertyList2(void) {} +void protocol_copyProtocolList(void) {} +void protocol_getMethodDescription(void) {} +void protocol_getName(void) {} +void protocol_getProperty(void) {} +void protocol_isEqual(void) {} +void sel_getName(void) {} +void sel_getUid(void) {} +void sel_isEqual(void) {} +void sel_isMapped(void) {} +void sel_registerName(void) {} +void objc_cache_buckets(void) {} +void objc_cache_bytesForCapacity(void) {} +void objc_cache_capacity(void) {} +void objc_cache_occupied(void) {} +void objc_copyClassesForImage(void) {} diff --git a/runtime/dummy.c b/runtime/dummy.c deleted file mode 100644 index e69de29..0000000 diff --git a/runtime/isa.h b/runtime/isa.h index b4741cb..8b552c2 100644 --- a/runtime/isa.h +++ b/runtime/isa.h @@ -55,26 +55,46 @@ // uintptr_t extraBytes : 1; // allocated with extra bytes # if __arm64__ -# define ISA_MASK 0x0000000ffffffff8ULL -# define ISA_MAGIC_MASK 0x000003f000000001ULL -# define ISA_MAGIC_VALUE 0x000001a000000001ULL -# define ISA_BITFIELD \ - uintptr_t nonpointer : 1; \ - uintptr_t has_assoc : 1; \ - uintptr_t has_cxx_dtor : 1; \ - uintptr_t shiftcls : 33; /*MACH_VM_MAX_ADDRESS 0x1000000000*/ \ - uintptr_t magic : 6; \ - uintptr_t weakly_referenced : 1; \ - uintptr_t deallocating : 1; \ - uintptr_t has_sidetable_rc : 1; \ - uintptr_t extra_rc : 19 -# define RC_ONE (1ULL<<45) -# define RC_HALF (1ULL<<18) +// ARM64 simulators have a larger address space, so use the ARM64e +// scheme even when simulators build for ARM64-not-e. +# if __has_feature(ptrauth_calls) || TARGET_OS_SIMULATOR +# define ISA_MASK 0x007ffffffffffff8ULL +# define ISA_MAGIC_MASK 0x0000000000000001ULL +# define ISA_MAGIC_VALUE 0x0000000000000001ULL +# define ISA_HAS_CXX_DTOR_BIT 0 +# define ISA_BITFIELD \ + uintptr_t nonpointer : 1; \ + uintptr_t has_assoc : 1; \ + uintptr_t weakly_referenced : 1; \ + uintptr_t shiftcls_and_sig : 52; \ + uintptr_t has_sidetable_rc : 1; \ + uintptr_t extra_rc : 8 +# define RC_ONE (1ULL<<56) +# define RC_HALF (1ULL<<7) +# else +# define ISA_MASK 0x0000000ffffffff8ULL +# define ISA_MAGIC_MASK 0x000003f000000001ULL +# define ISA_MAGIC_VALUE 0x000001a000000001ULL +# define ISA_HAS_CXX_DTOR_BIT 1 +# define ISA_BITFIELD \ + uintptr_t nonpointer : 1; \ + uintptr_t has_assoc : 1; \ + uintptr_t has_cxx_dtor : 1; \ + uintptr_t shiftcls : 33; /*MACH_VM_MAX_ADDRESS 0x1000000000*/ \ + uintptr_t magic : 6; \ + uintptr_t weakly_referenced : 1; \ + uintptr_t unused : 1; \ + uintptr_t has_sidetable_rc : 1; \ + uintptr_t extra_rc : 19 +# define RC_ONE (1ULL<<45) +# define RC_HALF (1ULL<<18) +# endif # elif __x86_64__ # define ISA_MASK 0x00007ffffffffff8ULL # define ISA_MAGIC_MASK 0x001f800000000001ULL # define ISA_MAGIC_VALUE 0x001d800000000001ULL +# define ISA_HAS_CXX_DTOR_BIT 1 # define ISA_BITFIELD \ uintptr_t nonpointer : 1; \ uintptr_t has_assoc : 1; \ @@ -82,7 +102,7 @@ uintptr_t shiftcls : 44; /*MACH_VM_MAX_ADDRESS 0x7fffffe00000*/ \ uintptr_t magic : 6; \ uintptr_t weakly_referenced : 1; \ - uintptr_t deallocating : 1; \ + uintptr_t unused : 1; \ uintptr_t has_sidetable_rc : 1; \ uintptr_t extra_rc : 8 # define RC_ONE (1ULL<<56) @@ -109,6 +129,7 @@ # define ISA_INDEX_COUNT (1 << ISA_INDEX_BITS) # define ISA_INDEX_MAGIC_MASK 0x001E0001 # define ISA_INDEX_MAGIC_VALUE 0x001C0001 +# define ISA_HAS_CXX_DTOR_BIT 1 # define ISA_BITFIELD \ uintptr_t nonpointer : 1; \ uintptr_t has_assoc : 1; \ @@ -116,7 +137,7 @@ uintptr_t magic : 4; \ uintptr_t has_cxx_dtor : 1; \ uintptr_t weakly_referenced : 1; \ - uintptr_t deallocating : 1; \ + uintptr_t unused : 1; \ uintptr_t has_sidetable_rc : 1; \ uintptr_t extra_rc : 7 # define RC_ONE (1ULL<<25) diff --git a/runtime/objc-abi.h b/runtime/objc-abi.h index 18430df..937a4be 100644 --- a/runtime/objc-abi.h +++ b/runtime/objc-abi.h @@ -46,7 +46,7 @@ /* Linker metadata symbols */ // NSObject was in Foundation/CF on macOS < 10.8. -#if TARGET_OS_OSX +#if TARGET_OS_OSX && (__x86_64__ || __i386__) #if __OBJC2__ OBJC_EXPORT const char __objc_nsobject_class_10_5 @@ -171,6 +171,15 @@ HasClassProperties: Old ABI: Set by some compilers. Not used by the runtime. */ +// Description of an expected duplicate class name. +// __DATA,__objc_dupclass stores one of these. Only the main image is +// consulted for these purposes. +typedef struct _objc_duplicate_class { + uint32_t version; + uint32_t flags; + const char name[64]; +} objc_duplicate_class; +#define OBJC_HAS_DUPLICATE_CLASS 1 /* Properties */ @@ -412,7 +421,7 @@ objc_retainBlock(id _Nullable) // Extract class pointer from an isa field. -#if TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC +#if TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST && !__arm64__ // No simulators use nonpointer isa yet. #elif __LP64__ diff --git a/runtime/objc-api.h b/runtime/objc-api.h index 284f24f..26b30bf 100644 --- a/runtime/objc-api.h +++ b/runtime/objc-api.h @@ -118,6 +118,12 @@ # define NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER 1 #endif +/* The arm64 ABI requires proper casting to ensure arguments are passed + * * correctly. */ +#if defined(__arm64__) && !__swift__ +# undef OBJC_OLD_DISPATCH_PROTOTYPES +# define OBJC_OLD_DISPATCH_PROTOTYPES 0 +#endif /* OBJC_OLD_DISPATCH_PROTOTYPES == 0 enforces the rule that the dispatch * functions must be cast to an appropriate function pointer type. */ diff --git a/runtime/objc-block-trampolines.mm b/runtime/objc-block-trampolines.mm index 9dea652..f905d35 100644 --- a/runtime/objc-block-trampolines.mm +++ b/runtime/objc-block-trampolines.mm @@ -57,6 +57,16 @@ # define TrampolinePtrauth #endif +// A page of trampolines is as big as the maximum supported page size +// everywhere except i386. i386 only exists for the watch simulator +// now, and we know it really only has 4kB pages. Also see comments +// below about PAGE_SIZE and PAGE_MAX_SIZE. +#ifdef __i386__ +#define TRAMPOLINE_PAGE_SIZE PAGE_MIN_SIZE +#else +#define TRAMPOLINE_PAGE_SIZE PAGE_MAX_SIZE +#endif + class TrampolinePointerWrapper { struct TrampolinePointers { class TrampolineAddress { @@ -103,22 +113,22 @@ class TrampolinePointerWrapper { void check() { #if DEBUG - ASSERT(impl.address() == textSegment + PAGE_MAX_SIZE); - ASSERT(impl.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE - assert(impl.address() + PAGE_MAX_SIZE == + ASSERT(impl.address() == textSegment + TRAMPOLINE_PAGE_SIZE); + ASSERT(impl.address() % PAGE_SIZE == 0); // not TRAMPOLINE_PAGE_SIZE + ASSERT(impl.address() + TRAMPOLINE_PAGE_SIZE == last.address() + SLOT_SIZE); ASSERT(last.address()+8 < textSegment + textSegmentSize); ASSERT((last.address() - start.address()) % SLOT_SIZE == 0); # if SUPPORT_STRET - ASSERT(impl_stret.address() == textSegment + 2*PAGE_MAX_SIZE); - ASSERT(impl_stret.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE - assert(impl_stret.address() + PAGE_MAX_SIZE == + ASSERT(impl_stret.address() == textSegment + 2*TRAMPOLINE_PAGE_SIZE); + ASSERT(impl_stret.address() % PAGE_SIZE == 0); // not TRAMPOLINE_PAGE_SIZE + ASSERT(impl_stret.address() + TRAMPOLINE_PAGE_SIZE == last_stret.address() + SLOT_SIZE); - assert(start.address() - impl.address() == + ASSERT(start.address() - impl.address() == start_stret.address() - impl_stret.address()); - assert(last_stret.address() + SLOT_SIZE < + ASSERT(last_stret.address() + SLOT_SIZE < textSegment + textSegmentSize); - assert((last_stret.address() - start_stret.address()) + ASSERT((last_stret.address() - start_stret.address()) % SLOT_SIZE == 0); # endif #endif @@ -178,8 +188,7 @@ public: uintptr_t textSegment() { return get()->textSegment; } uintptr_t textSegmentSize() { return get()->textSegmentSize; } - // See comments below about PAGE_SIZE and PAGE_MAX_SIZE. - uintptr_t dataSize() { return PAGE_MAX_SIZE; } + uintptr_t dataSize() { return TRAMPOLINE_PAGE_SIZE; } uintptr_t impl() { return get()->impl.address(); } uintptr_t start() { return get()->start.address(); } @@ -202,11 +211,13 @@ typedef enum { // We must take care with our data layout on architectures that support // multiple page sizes. // -// The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE. -// On some platforms this requires additional linker flags. +// The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE, +// except on i386 which is a weird special case that uses PAGE_MIN_SIZE. +// The TRAMPOLINE_PAGE_SIZE macro handles this difference. On some platforms, +// aligning to PAGE_MAX_SIZE requires additional linker flags. // -// When we allocate a page group, we use PAGE_MAX_SIZE size. -// This allows trampoline code to find its data by subtracting PAGE_MAX_SIZE. +// When we allocate a page group, we use TRAMPOLINE_PAGE_SIZE size. +// This allows trampoline code to find its data by subtracting TRAMPOLINE_PAGE_SIZE. // // When we allocate a page group, we use the process's page alignment. // This simplifies allocation because we don't need to force greater than @@ -231,14 +242,14 @@ struct TrampolineBlockPageGroup // Payload data: block pointers and free list. // Bytes parallel with trampoline header code are the fields above or unused - // uint8_t payloads[PAGE_MAX_SIZE - sizeof(TrampolineBlockPageGroup)] + // uint8_t payloads[TRAMPOLINE_PAGE_SIZE - sizeof(TrampolineBlockPageGroup)] // Code: Mach-O header, then trampoline header followed by trampolines. // On platforms with struct return we have non-stret trampolines and // stret trampolines. The stret and non-stret trampolines at a given // index share the same data page. - // uint8_t macho[PAGE_MAX_SIZE]; - // uint8_t trampolines[ArgumentModeCount][PAGE_MAX_SIZE]; + // uint8_t macho[TRAMPOLINE_PAGE_SIZE]; + // uint8_t trampolines[ArgumentModeCount][TRAMPOLINE_PAGE_SIZE]; // Per-trampoline block data format: // initial value is 0 while page data is filled sequentially @@ -280,7 +291,7 @@ struct TrampolineBlockPageGroup // Skip over the data area, one page of Mach-O headers, // and one text page for each mode before this one. return (uintptr_t)this + Trampolines.dataSize() + - PAGE_MAX_SIZE * (1 + aMode); + TRAMPOLINE_PAGE_SIZE * (1 + aMode); } IMP trampoline(int aMode, uintptr_t index) { diff --git a/runtime/objc-blocktramps-i386.S b/runtime/objc-blocktramps-i386.S old mode 100644 new mode 100755 index d4f1eb8..cd7c9ce --- a/runtime/objc-blocktramps-i386.S +++ b/runtime/objc-blocktramps-i386.S @@ -30,13 +30,13 @@ .globl __objc_blockTrampolineStart .globl __objc_blockTrampolineLast -.align PAGE_SHIFT +.align 12 /* PAGE_SHIFT */ __objc_blockTrampolineImpl: movl (%esp), %eax // return address pushed by trampoline // 4(%esp) is return address pushed by the call site movl 8(%esp), %ecx // self -> ecx movl %ecx, 12(%esp) // ecx -> _cmd - movl -2*PAGE_SIZE-5(%eax), %ecx // block object pointer -> ecx + movl -2*4096/*PAGE_SIZE */-5(%eax), %ecx // block object pointer -> ecx // trampoline is -5 bytes from the return address // data is -2 pages from the trampoline movl %ecx, 8(%esp) // ecx -> self @@ -567,14 +567,14 @@ __objc_blockTrampolineLast: .globl __objc_blockTrampolineStart_stret .globl __objc_blockTrampolineLast_stret -.align PAGE_SHIFT +.align 12 /* PAGE_SHIFT */ __objc_blockTrampolineImpl_stret: movl (%esp), %eax // return address pushed by trampoline // 4(%esp) is return address pushed by the call site // 8(%esp) is struct-return address movl 12(%esp), %ecx // self -> ecx movl %ecx, 16(%esp) // ecx -> _cmd - movl -3*PAGE_SIZE-5(%eax), %ecx // block object pointer -> ecx + movl -3*4096/*PAGE_SIZE*/-5(%eax), %ecx // block object pointer -> ecx // trampoline is -5 bytes from the return address // data is -3 pages from the trampoline movl %ecx, 12(%esp) // ecx -> self diff --git a/runtime/objc-blocktramps-x86_64.S b/runtime/objc-blocktramps-x86_64.S old mode 100644 new mode 100755 index 5f377f0..618e0f1 --- a/runtime/objc-blocktramps-x86_64.S +++ b/runtime/objc-blocktramps-x86_64.S @@ -30,22 +30,37 @@ .globl __objc_blockTrampolineStart .globl __objc_blockTrampolineLast -.align PAGE_SHIFT +.align PAGE_MAX_SHIFT __objc_blockTrampolineImpl: movq (%rsp), %r10 // read return address pushed by TrampolineEntry's callq movq %rdi, %rsi // arg1 -> arg2 - movq -2*PAGE_SIZE-5(%r10), %rdi // block object pointer -> arg1 + movq -2*PAGE_MAX_SIZE-5(%r10), %rdi // block object pointer -> arg1 // trampoline is -5 bytes from the return address // data is -2 pages from the trampoline ret // back to TrampolineEntry to preserve CPU's return stack -.macro TrampolineEntry +.macro TrampolineEntry1 // This trampoline is 8 bytes long. // This callq is 5 bytes long. callq __objc_blockTrampolineImpl jmp *16(%rdi) .endmacro +.macro TrampolineEntry4 + TrampolineEntry1 + TrampolineEntry1 + TrampolineEntry1 + TrampolineEntry1 +.endmacro + +#if PAGE_MAX_SHIFT == 12 +#define TrampolineEntry TrampolineEntry1 +#elif PAGE_MAX_SHIFT == 14 +#define TrampolineEntry TrampolineEntry4 +#else +#error "unknown PAGE_MAX_SHIFT value" +#endif + .align 5 __objc_blockTrampolineStart: TrampolineEntry @@ -555,8 +570,26 @@ __objc_blockTrampolineStart: TrampolineEntry TrampolineEntry TrampolineEntry + +// The above is 507 entries. +#if PAGE_MAX_SHIFT == 14 +// With 16kB pages, we need (4096*4-32)/8 = 2044 single entries, or +// 511 "quad" entries as above. We need 3 more regular entries, then +// 3 more singular entries, and finally a singular entry labeled Last. + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry1 + TrampolineEntry1 + TrampolineEntry1 +__objc_blockTrampolineLast: + TrampolineEntry1 +#else +// With 4kB pages, we need (4096-32)/8 = 508 entries. We have one +// more at the end with the Last label for a total of 508. __objc_blockTrampolineLast: TrampolineEntry +#endif .text @@ -564,24 +597,39 @@ __objc_blockTrampolineLast: .globl __objc_blockTrampolineStart_stret .globl __objc_blockTrampolineLast_stret -.align PAGE_SHIFT +.align PAGE_MAX_SHIFT __objc_blockTrampolineImpl_stret: // %rdi -- arg1 -- is address of return value's space. Don't mess with it. movq (%rsp), %r10 // read return address pushed by TrampolineEntry's callq movq %rsi, %rdx // arg2 -> arg3 - movq -3*PAGE_SIZE-5(%r10), %rsi // block object pointer -> arg2 + movq -3*PAGE_MAX_SIZE-5(%r10), %rsi // block object pointer -> arg2 // trampoline is -5 bytes from the return address // data is -3 pages from the trampoline ret // back to TrampolineEntry to preserve CPU's return stack -.macro TrampolineEntry_stret +.macro TrampolineEntry_stret1 // This trampoline is 8 bytes long. // This callq is 5 bytes long. callq __objc_blockTrampolineImpl_stret jmp *16(%rsi) .endmacro +.macro TrampolineEntry_stret4 + TrampolineEntry_stret1 + TrampolineEntry_stret1 + TrampolineEntry_stret1 + TrampolineEntry_stret1 +.endmacro + +#if PAGE_MAX_SHIFT == 12 +#define TrampolineEntry_stret TrampolineEntry_stret1 +#elif PAGE_MAX_SHIFT == 14 +#define TrampolineEntry_stret TrampolineEntry_stret4 +#else +#error "unknown PAGE_MAX_SHIFT value" +#endif + .align 5 __objc_blockTrampolineStart_stret: TrampolineEntry_stret @@ -1091,7 +1139,21 @@ __objc_blockTrampolineStart_stret: TrampolineEntry_stret TrampolineEntry_stret TrampolineEntry_stret + +// See the comment on non-stret's Last for why we have additional +// entries here. +#if PAGE_MAX_SHIFT == 14 + TrampolineEntry_stret + TrampolineEntry_stret + TrampolineEntry_stret + TrampolineEntry_stret1 + TrampolineEntry_stret1 + TrampolineEntry_stret1 +__objc_blockTrampolineLast_stret: + TrampolineEntry_stret1 +#else __objc_blockTrampolineLast_stret: TrampolineEntry_stret +#endif #endif diff --git a/runtime/objc-cache-old.mm b/runtime/objc-cache-old.mm index fed884a..50fbab0 100644 --- a/runtime/objc-cache-old.mm +++ b/runtime/objc-cache-old.mm @@ -1795,9 +1795,5 @@ void _class_printMethodCacheStatistics(void) #endif -void cache_init() -{ -} - // !__OBJC2__ #endif diff --git a/runtime/objc-cache.h b/runtime/objc-cache.h deleted file mode 100644 index e0448e7..0000000 --- a/runtime/objc-cache.h +++ /dev/null @@ -1,23 +0,0 @@ - -#ifndef _OBJC_CACHE_H -#define _OBJC_CACHE_H - -#include "objc-private.h" - -__BEGIN_DECLS - -extern void cache_init(void); - -extern IMP cache_getImp(Class cls, SEL sel); - -extern void cache_fill(Class cls, SEL sel, IMP imp, id receiver); - -extern void cache_erase_nolock(Class cls); - -extern void cache_delete(Class cls); - -extern void cache_collect(bool collectALot); - -__END_DECLS - -#endif diff --git a/runtime/objc-cache.mm b/runtime/objc-cache.mm index 4e6ca11..213d147 100644 --- a/runtime/objc-cache.mm +++ b/runtime/objc-cache.mm @@ -63,14 +63,12 @@ * objc_msgSend* * cache_getImp * - * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked) - * cache_fill (acquires lock) - * cache_expand (only called from cache_fill) - * cache_create (only called from cache_expand) - * bcopy (only called from instrumented cache_expand) - * flush_caches (acquires lock) - * cache_flush (only called from cache_fill and flush_caches) - * cache_collect_free (only called from cache_expand and cache_flush) + * Cache readers/writers (hold cacheUpdateLock during access; not PC-checked) + * cache_t::copyCacheNolock (caller must hold the lock) + * cache_t::eraseNolock (caller must hold the lock) + * cache_t::collectNolock (caller must hold the lock) + * cache_t::insert (acquires lock) + * cache_t::destroy (acquires lock) * * UNPROTECTED cache readers (NOT thread-safe; used for debug info only) * cache_print @@ -84,21 +82,96 @@ #if __OBJC2__ #include "objc-private.h" -#include "objc-cache.h" +#if TARGET_OS_OSX +#include +#include +#endif + +#if __arm__ || __x86_64__ || __i386__ + +// objc_msgSend has few registers available. +// Cache scan increments and wraps at special end-marking bucket. +#define CACHE_END_MARKER 1 + +// Historical fill ratio of 75% (since the new objc runtime was introduced). +static inline mask_t cache_fill_ratio(mask_t capacity) { + return capacity * 3 / 4; +} + +#elif __arm64__ && !__LP64__ + +// objc_msgSend has lots of registers available. +// Cache scan decrements. No end marker needed. +#define CACHE_END_MARKER 0 + +// Historical fill ratio of 75% (since the new objc runtime was introduced). +static inline mask_t cache_fill_ratio(mask_t capacity) { + return capacity * 3 / 4; +} + +#elif __arm64__ && __LP64__ + +// objc_msgSend has lots of registers available. +// Cache scan decrements. No end marker needed. +#define CACHE_END_MARKER 0 + +// Allow 87.5% fill ratio in the fast path for all cache sizes. +// Increasing the cache fill ratio reduces the fragmentation and wasted space +// in imp-caches at the cost of potentially increasing the average lookup of +// a selector in imp-caches by increasing collision chains. Another potential +// change is that cache table resizes / resets happen at different moments. +static inline mask_t cache_fill_ratio(mask_t capacity) { + return capacity * 7 / 8; +} + +// Allow 100% cache utilization for smaller cache sizes. This has the same +// advantages and disadvantages as the fill ratio. A very large percentage +// of caches end up with very few entries and the worst case of collision +// chains in small tables is relatively small. +// NOTE: objc_msgSend properly handles a cache lookup with a full cache. +#define CACHE_ALLOW_FULL_UTILIZATION 1 + +#else +#error unknown architecture +#endif /* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */ enum { +#if CACHE_END_MARKER || (__arm64__ && !__LP64__) + // When we have a cache end marker it fills a bucket slot, so having a + // initial cache size of 2 buckets would not be efficient when one of the + // slots is always filled with the end marker. So start with a cache size + // 4 buckets. INIT_CACHE_SIZE_LOG2 = 2, +#else + // Allow an initial bucket size of 2 buckets, since a large number of + // classes, especially metaclasses, have very few imps, and we support + // the ability to fill 100% of the cache before resizing. + INIT_CACHE_SIZE_LOG2 = 1, +#endif INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2), MAX_CACHE_SIZE_LOG2 = 16, MAX_CACHE_SIZE = (1 << MAX_CACHE_SIZE_LOG2), + FULL_UTILIZATION_CACHE_SIZE_LOG2 = 3, + FULL_UTILIZATION_CACHE_SIZE = (1 << FULL_UTILIZATION_CACHE_SIZE_LOG2), }; -static void cache_collect_free(struct bucket_t *data, mask_t capacity); static int _collecting_in_critical(void); static void _garbage_make_room(void); +#if DEBUG_TASK_THREADS +static kern_return_t objc_task_threads +( + task_t target_task, + thread_act_array_t *act_list, + mach_msg_type_number_t *act_listCnt +); +#endif + +#if DEBUG_TASK_THREADS +#undef HAVE_TASK_RESTARTABLE_RANGES +#endif /*********************************************************************** * Cache statistics for OBJC_PRINT_CACHE_SETUP @@ -159,25 +232,21 @@ asm("\n .section __TEXT,__const" #endif ); +#if CONFIG_USE_PREOPT_CACHES +__attribute__((used, section("__DATA_CONST,__objc_scoffs"))) +uintptr_t objc_opt_offsets[__OBJC_OPT_OFFSETS_COUNT]; +#endif -#if __arm__ || __x86_64__ || __i386__ -// objc_msgSend has few registers available. -// Cache scan increments and wraps at special end-marking bucket. -#define CACHE_END_MARKER 1 +#if CACHE_END_MARKER static inline mask_t cache_next(mask_t i, mask_t mask) { return (i+1) & mask; } - #elif __arm64__ -// objc_msgSend has lots of registers available. -// Cache scan decrements. No end marker needed. -#define CACHE_END_MARKER 0 static inline mask_t cache_next(mask_t i, mask_t mask) { return i ? i-1 : mask; } - #else -#error unknown architecture +#error unexpected configuration #endif @@ -237,29 +306,27 @@ ldp(uintptr_t& onep, uintptr_t& twop, const void *srcp) static inline mask_t cache_hash(SEL sel, mask_t mask) { - return (mask_t)(uintptr_t)sel & mask; -} - -cache_t *getCache(Class cls) -{ - ASSERT(cls); - return &cls->cache; + uintptr_t value = (uintptr_t)sel; +#if CONFIG_USE_PREOPT_CACHES + value ^= value >> 7; +#endif + return (mask_t)(value & mask); } #if __arm64__ template -void bucket_t::set(SEL newSel, IMP newImp, Class cls) +void bucket_t::set(bucket_t *base, SEL newSel, IMP newImp, Class cls) { - ASSERT(_sel.load(memory_order::memory_order_relaxed) == 0 || - _sel.load(memory_order::memory_order_relaxed) == newSel); + ASSERT(_sel.load(memory_order_relaxed) == 0 || + _sel.load(memory_order_relaxed) == newSel); static_assert(offsetof(bucket_t,_imp) == 0 && offsetof(bucket_t,_sel) == sizeof(void *), "bucket_t layout doesn't match arm64 bucket_t::set()"); uintptr_t encodedImp = (impEncoding == Encoded - ? encodeImp(newImp, newSel, cls) + ? encodeImp(base, newImp, newSel, cls) : (uintptr_t)newImp); // LDP/STP guarantees that all observers get @@ -270,10 +337,10 @@ void bucket_t::set(SEL newSel, IMP newImp, Class cls) #else template -void bucket_t::set(SEL newSel, IMP newImp, Class cls) +void bucket_t::set(bucket_t *base, SEL newSel, IMP newImp, Class cls) { - ASSERT(_sel.load(memory_order::memory_order_relaxed) == 0 || - _sel.load(memory_order::memory_order_relaxed) == newSel); + ASSERT(_sel.load(memory_order_relaxed) == 0 || + _sel.load(memory_order_relaxed) == newSel); // objc_msgSend uses sel and imp with no locks. // It is safe for objc_msgSend to see new imp but NULL sel @@ -282,30 +349,196 @@ void bucket_t::set(SEL newSel, IMP newImp, Class cls) // Therefore we write new imp, wait a lot, then write new sel. uintptr_t newIMP = (impEncoding == Encoded - ? encodeImp(newImp, newSel, cls) + ? encodeImp(base, newImp, newSel, cls) : (uintptr_t)newImp); if (atomicity == Atomic) { - _imp.store(newIMP, memory_order::memory_order_relaxed); + _imp.store(newIMP, memory_order_relaxed); - if (_sel.load(memory_order::memory_order_relaxed) != newSel) { + if (_sel.load(memory_order_relaxed) != newSel) { #ifdef __arm__ mega_barrier(); - _sel.store(newSel, memory_order::memory_order_relaxed); + _sel.store(newSel, memory_order_relaxed); #elif __x86_64__ || __i386__ - _sel.store(newSel, memory_order::memory_order_release); + _sel.store(newSel, memory_order_release); #else #error Don't know how to do bucket_t::set on this architecture. #endif } } else { - _imp.store(newIMP, memory_order::memory_order_relaxed); - _sel.store(newSel, memory_order::memory_order_relaxed); + _imp.store(newIMP, memory_order_relaxed); + _sel.store(newSel, memory_order_relaxed); } } #endif +void cache_t::initializeToEmpty() +{ + _bucketsAndMaybeMask.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed); + _originalPreoptCache.store(nullptr, std::memory_order_relaxed); +} + +#if CONFIG_USE_PREOPT_CACHES +/* + * The shared cache builder will sometimes have prebuilt an IMP cache + * for the class and left a `preopt_cache_t` pointer in _originalPreoptCache. + * + * However we have this tension: + * - when the class is realized it has to have a cache that can't resolve any + * selector until the class is properly initialized so that every + * caller falls in the slowpath and synchronizes with the class initializing, + * - we need to remember that cache pointer and we have no space for that. + * + * The caches are designed so that preopt_cache::bit_one is set to 1, + * so we "disguise" the pointer so that it looks like a cache of capacity 1 + * where that bit one aliases with where the top bit of a SEL in the bucket_t + * would live: + * + * +----------------+----------------+ + * | IMP | SEL | << a bucket_t + * +----------------+----------------+--------------... + * preopt_cache_t >>| 1| ... + * +----------------+--------------... + * + * The shared cache guarantees that there's valid memory to read under "IMP" + * + * This lets us encode the original preoptimized cache pointer during + * initialization, and we can reconstruct its original address and install + * it back later. + */ +void cache_t::initializeToPreoptCacheInDisguise(const preopt_cache_t *cache) +{ + // preopt_cache_t::bit_one is 1 which sets the top bit + // and is never set on any valid selector + + uintptr_t value = (uintptr_t)cache + sizeof(preopt_cache_t) - + (bucket_t::offsetOfSel() + sizeof(SEL)); + + _originalPreoptCache.store(nullptr, std::memory_order_relaxed); + setBucketsAndMask((bucket_t *)value, 0); + _occupied = cache->occupied; +} + +void cache_t::maybeConvertToPreoptimized() +{ + const preopt_cache_t *cache = disguised_preopt_cache(); + + if (cache == nil) { + return; + } + + if (!cls()->allowsPreoptCaches() || + (cache->has_inlines && !cls()->allowsPreoptInlinedSels())) { + if (PrintCaches) { + _objc_inform("CACHES: %sclass %s: dropping cache (from %s)", + cls()->isMetaClass() ? "meta" : "", + cls()->nameForLogging(), "setInitialized"); + } + return setBucketsAndMask(emptyBuckets(), 0); + } + + uintptr_t value = (uintptr_t)&cache->entries; +#if __has_feature(ptrauth_calls) + value = (uintptr_t)ptrauth_sign_unauthenticated((void *)value, + ptrauth_key_process_dependent_data, (uintptr_t)cls()); +#endif + value |= preoptBucketsHashParams(cache) | preoptBucketsMarker; + _bucketsAndMaybeMask.store(value, memory_order_relaxed); + _occupied = cache->occupied; +} + +void cache_t::initializeToEmptyOrPreoptimizedInDisguise() +{ + if (os_fastpath(!DisablePreoptCaches)) { + if (!objc::dataSegmentsRanges.inSharedCache((uintptr_t)this)) { + if (dyld_shared_cache_some_image_overridden()) { + // If the system has roots, then we must disable preoptimized + // caches completely. If a class in another image has a + // superclass in the root, the offset to the superclass will + // be wrong. rdar://problem/61601961 + cls()->setDisallowPreoptCachesRecursively("roots"); + } + return initializeToEmpty(); + } + + auto cache = _originalPreoptCache.load(memory_order_relaxed); + if (cache) { + return initializeToPreoptCacheInDisguise(cache); + } + } + + return initializeToEmpty(); +} + +const preopt_cache_t *cache_t::preopt_cache() const +{ + auto addr = _bucketsAndMaybeMask.load(memory_order_relaxed); + addr &= preoptBucketsMask; +#if __has_feature(ptrauth_calls) +#if __BUILDING_OBJCDT__ + addr = (uintptr_t)ptrauth_strip((preopt_cache_entry_t *)addr, + ptrauth_key_process_dependent_data); +#else + addr = (uintptr_t)ptrauth_auth_data((preopt_cache_entry_t *)addr, + ptrauth_key_process_dependent_data, (uintptr_t)cls()); +#endif +#endif + return (preopt_cache_t *)(addr - sizeof(preopt_cache_t)); +} + +const preopt_cache_t *cache_t::disguised_preopt_cache() const +{ + bucket_t *b = buckets(); + if ((intptr_t)b->sel() >= 0) return nil; + + uintptr_t value = (uintptr_t)b + bucket_t::offsetOfSel() + sizeof(SEL); + return (preopt_cache_t *)(value - sizeof(preopt_cache_t)); +} + +Class cache_t::preoptFallbackClass() const +{ + return (Class)((uintptr_t)cls() + preopt_cache()->fallback_class_offset); +} + +bool cache_t::isConstantOptimizedCache(bool strict, uintptr_t empty_addr) const +{ + uintptr_t addr = _bucketsAndMaybeMask.load(memory_order_relaxed); + if (addr & preoptBucketsMarker) { + return true; + } + if (strict) { + return false; + } + return mask() == 0 && addr != empty_addr; +} + +bool cache_t::shouldFlush(SEL sel, IMP imp) const +{ + // This test isn't backwards: disguised caches aren't "strict" + // constant optimized caches + if (!isConstantOptimizedCache(/*strict*/true)) { + const preopt_cache_t *cache = disguised_preopt_cache(); + if (cache) { + uintptr_t offs = (uintptr_t)sel - (uintptr_t)@selector(🤯); + uintptr_t slot = ((offs >> cache->shift) & cache->mask); + auto &entry = cache->entries[slot]; + + return entry.sel_offs == offs && + (uintptr_t)cls() - entry.imp_offs == + (uintptr_t)ptrauth_strip(imp, ptrauth_key_function_pointer); + } + } + + return cache_getImp(cls(), sel) == imp; +} + +bool cache_t::isConstantOptimizedCacheWithInlinedSels() const +{ + return isConstantOptimizedCache(/* strict */true) && preopt_cache()->has_inlines; +} +#endif // CONFIG_USE_PREOPT_CACHES + #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask) @@ -321,135 +554,85 @@ void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask) // ensure other threads see buckets contents before buckets pointer mega_barrier(); - _buckets.store(newBuckets, memory_order::memory_order_relaxed); - + _bucketsAndMaybeMask.store((uintptr_t)newBuckets, memory_order_relaxed); + // ensure other threads see new buckets before new mask mega_barrier(); - - _mask.store(newMask, memory_order::memory_order_relaxed); + + _maybeMask.store(newMask, memory_order_relaxed); _occupied = 0; #elif __x86_64__ || i386 // ensure other threads see buckets contents before buckets pointer - _buckets.store(newBuckets, memory_order::memory_order_release); - + _bucketsAndMaybeMask.store((uintptr_t)newBuckets, memory_order_release); + // ensure other threads see new buckets before new mask - _mask.store(newMask, memory_order::memory_order_release); + _maybeMask.store(newMask, memory_order_release); _occupied = 0; #else #error Don't know how to do setBucketsAndMask on this architecture. #endif } -struct bucket_t *cache_t::emptyBuckets() +mask_t cache_t::mask() const { - return (bucket_t *)&_objc_empty_cache; + return _maybeMask.load(memory_order_relaxed); } -struct bucket_t *cache_t::buckets() -{ - return _buckets.load(memory_order::memory_order_relaxed); -} - -mask_t cache_t::mask() -{ - return _mask.load(memory_order::memory_order_relaxed); -} - -void cache_t::initializeToEmpty() -{ - bzero(this, sizeof(*this)); - _buckets.store((bucket_t *)&_objc_empty_cache, memory_order::memory_order_relaxed); -} - -#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 +#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 || CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask) { uintptr_t buckets = (uintptr_t)newBuckets; uintptr_t mask = (uintptr_t)newMask; - + ASSERT(buckets <= bucketsMask); ASSERT(mask <= maxMask); - - _maskAndBuckets.store(((uintptr_t)newMask << maskShift) | (uintptr_t)newBuckets, std::memory_order_relaxed); + + _bucketsAndMaybeMask.store(((uintptr_t)newMask << maskShift) | (uintptr_t)newBuckets, memory_order_relaxed); _occupied = 0; } -struct bucket_t *cache_t::emptyBuckets() +mask_t cache_t::mask() const { - return (bucket_t *)&_objc_empty_cache; -} - -struct bucket_t *cache_t::buckets() -{ - uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed); - return (bucket_t *)(maskAndBuckets & bucketsMask); -} - -mask_t cache_t::mask() -{ - uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed); + uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed); return maskAndBuckets >> maskShift; } -void cache_t::initializeToEmpty() -{ - bzero(this, sizeof(*this)); - _maskAndBuckets.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed); -} - #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4 void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask) { uintptr_t buckets = (uintptr_t)newBuckets; unsigned mask = (unsigned)newMask; - + ASSERT(buckets == (buckets & bucketsMask)); ASSERT(mask <= 0xffff); - - // The shift amount is equal to the number of leading zeroes in - // the last 16 bits of mask. Count all the leading zeroes, then - // subtract to ignore the top half. - uintptr_t maskShift = __builtin_clz(mask) - (sizeof(mask) * CHAR_BIT - 16); - ASSERT(mask == (0xffff >> maskShift)); - - _maskAndBuckets.store(buckets | maskShift, memory_order::memory_order_relaxed); + + _bucketsAndMaybeMask.store(buckets | objc::mask16ShiftBits(mask), memory_order_relaxed); _occupied = 0; - + ASSERT(this->buckets() == newBuckets); ASSERT(this->mask() == newMask); } -struct bucket_t *cache_t::emptyBuckets() +mask_t cache_t::mask() const { - return (bucket_t *)((uintptr_t)&_objc_empty_cache & bucketsMask); -} - -struct bucket_t *cache_t::buckets() -{ - uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed); - return (bucket_t *)(maskAndBuckets & bucketsMask); -} - -mask_t cache_t::mask() -{ - uintptr_t maskAndBuckets = _maskAndBuckets.load(memory_order::memory_order_relaxed); + uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed); uintptr_t maskShift = (maskAndBuckets & maskMask); return 0xffff >> maskShift; } -void cache_t::initializeToEmpty() -{ - bzero(this, sizeof(*this)); - _maskAndBuckets.store((uintptr_t)&_objc_empty_cache, std::memory_order_relaxed); -} - #else #error Unknown cache mask storage type. #endif -mask_t cache_t::occupied() +struct bucket_t *cache_t::buckets() const +{ + uintptr_t addr = _bucketsAndMaybeMask.load(memory_order_relaxed); + return (bucket_t *)(addr & bucketsMask); +} + +mask_t cache_t::occupied() const { return _occupied; } @@ -459,11 +642,15 @@ void cache_t::incrementOccupied() _occupied++; } -unsigned cache_t::capacity() +unsigned cache_t::capacity() const { return mask() ? mask()+1 : 0; } +Class cache_t::cls() const +{ + return (Class)((uintptr_t)this - offsetof(objc_class, cache)); +} size_t cache_t::bytesForCapacity(uint32_t cap) { @@ -477,22 +664,21 @@ bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap) return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1; } -bucket_t *allocateBuckets(mask_t newCapacity) +bucket_t *cache_t::allocateBuckets(mask_t newCapacity) { // Allocate one extra bucket to mark the end of the list. // This can't overflow mask_t because newCapacity is a power of 2. - bucket_t *newBuckets = (bucket_t *) - calloc(cache_t::bytesForCapacity(newCapacity), 1); + bucket_t *newBuckets = (bucket_t *)calloc(bytesForCapacity(newCapacity), 1); - bucket_t *end = cache_t::endMarker(newBuckets, newCapacity); + bucket_t *end = endMarker(newBuckets, newCapacity); #if __arm__ // End marker's sel is 1 and imp points BEFORE the first bucket. // This saves an instruction in objc_msgSend. - end->set((SEL)(uintptr_t)1, (IMP)(newBuckets - 1), nil); + end->set(newBuckets, (SEL)(uintptr_t)1, (IMP)(newBuckets - 1), nil); #else // End marker's sel is 1 and imp points to the first bucket. - end->set((SEL)(uintptr_t)1, (IMP)newBuckets, nil); + end->set(newBuckets, (SEL)(uintptr_t)1, (IMP)newBuckets, nil); #endif if (PrintCaches) recordNewCache(newCapacity); @@ -502,17 +688,21 @@ bucket_t *allocateBuckets(mask_t newCapacity) #else -bucket_t *allocateBuckets(mask_t newCapacity) +bucket_t *cache_t::allocateBuckets(mask_t newCapacity) { if (PrintCaches) recordNewCache(newCapacity); - return (bucket_t *)calloc(cache_t::bytesForCapacity(newCapacity), 1); + return (bucket_t *)calloc(bytesForCapacity(newCapacity), 1); } #endif +struct bucket_t *cache_t::emptyBuckets() +{ + return (bucket_t *)((uintptr_t)&_objc_empty_cache & bucketsMask); +} -bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true) +bucket_t *cache_t::emptyBucketsForCapacity(mask_t capacity, bool allocate) { #if CONFIG_USE_CACHE_LOCK cacheUpdateLock.assertLocked(); @@ -520,11 +710,11 @@ bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true) runtimeLock.assertLocked(); #endif - size_t bytes = cache_t::bytesForCapacity(capacity); + size_t bytes = bytesForCapacity(capacity); // Use _objc_empty_cache if the buckets is small enough. if (bytes <= EMPTY_BYTES) { - return cache_t::emptyBuckets(); + return emptyBuckets(); } // Use shared empty buckets allocated on the heap. @@ -556,17 +746,16 @@ bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true) return emptyBucketsList[index]; } - -bool cache_t::isConstantEmptyCache() +bool cache_t::isConstantEmptyCache() const { - return - occupied() == 0 && + return + occupied() == 0 && buckets() == emptyBucketsForCapacity(capacity(), false); } -bool cache_t::canBeFreed() +bool cache_t::canBeFreed() const { - return !isConstantEmptyCache(); + return !isConstantEmptyCache() && !isConstantOptimizedCache(); } ALWAYS_INLINE @@ -585,68 +774,79 @@ void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld) setBucketsAndMask(newBuckets, newCapacity - 1); if (freeOld) { - cache_collect_free(oldBuckets, oldCapacity); + collect_free(oldBuckets, oldCapacity); } } -void cache_t::bad_cache(id receiver, SEL sel, Class isa) +void cache_t::bad_cache(id receiver, SEL sel) { // Log in separate steps in case the logging itself causes a crash. _objc_inform_now_and_on_crash ("Method cache corrupted. This may be a message to an " "invalid object, or a memory error somewhere else."); - cache_t *cache = &isa->cache; #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED - bucket_t *buckets = cache->_buckets.load(memory_order::memory_order_relaxed); + bucket_t *b = buckets(); _objc_inform_now_and_on_crash ("%s %p, SEL %p, isa %p, cache %p, buckets %p, " "mask 0x%x, occupied 0x%x", receiver ? "receiver" : "unused", receiver, - sel, isa, cache, buckets, - cache->_mask.load(memory_order::memory_order_relaxed), - cache->_occupied); + sel, cls(), this, b, + _maybeMask.load(memory_order_relaxed), + _occupied); _objc_inform_now_and_on_crash ("%s %zu bytes, buckets %zu bytes", receiver ? "receiver" : "unused", malloc_size(receiver), - malloc_size(buckets)); + malloc_size(b)); #elif (CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 || \ + CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS || \ CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4) - uintptr_t maskAndBuckets = cache->_maskAndBuckets.load(memory_order::memory_order_relaxed); + uintptr_t maskAndBuckets = _bucketsAndMaybeMask.load(memory_order_relaxed); _objc_inform_now_and_on_crash ("%s %p, SEL %p, isa %p, cache %p, buckets and mask 0x%lx, " "occupied 0x%x", receiver ? "receiver" : "unused", receiver, - sel, isa, cache, maskAndBuckets, - cache->_occupied); + sel, cls(), this, maskAndBuckets, _occupied); _objc_inform_now_and_on_crash ("%s %zu bytes, buckets %zu bytes", receiver ? "receiver" : "unused", malloc_size(receiver), - malloc_size(cache->buckets())); + malloc_size(buckets())); #else #error Unknown cache mask storage type. #endif _objc_inform_now_and_on_crash ("selector '%s'", sel_getName(sel)); _objc_inform_now_and_on_crash - ("isa '%s'", isa->nameForLogging()); + ("isa '%s'", cls()->nameForLogging()); _objc_fatal ("Method cache corrupted. This may be a message to an " "invalid object, or a memory error somewhere else."); } -ALWAYS_INLINE -void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver) +void cache_t::insert(SEL sel, IMP imp, id receiver) { -#if CONFIG_USE_CACHE_LOCK - cacheUpdateLock.assertLocked(); -#else runtimeLock.assertLocked(); + + // Never cache before +initialize is done + if (slowpath(!cls()->isInitialized())) { + return; + } + + if (isConstantOptimizedCache()) { + _objc_fatal("cache_t::insert() called with a preoptimized cache for %s", + cls()->nameForLogging()); + } + +#if DEBUG_TASK_THREADS + return _collecting_in_critical(); +#else +#if CONFIG_USE_CACHE_LOCK + mutex_locker_t lock(cacheUpdateLock); #endif - ASSERT(sel != 0 && cls->isInitialized()); + ASSERT(sel != 0 && cls()->isInitialized()); - // Use the cache as-is if it is less than 3/4 full + // Use the cache as-is if until we exceed our expected fill ratio. mask_t newOccupied = occupied() + 1; unsigned oldCapacity = capacity(), capacity = oldCapacity; if (slowpath(isConstantEmptyCache())) { @@ -654,9 +854,14 @@ void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver) if (!capacity) capacity = INIT_CACHE_SIZE; reallocate(oldCapacity, capacity, /* freeOld */false); } - else if (fastpath(newOccupied + CACHE_END_MARKER <= capacity / 4 * 3)) { - // Cache is less than 3/4 full. Use it as-is. + else if (fastpath(newOccupied + CACHE_END_MARKER <= cache_fill_ratio(capacity))) { + // Cache is less than 3/4 or 7/8 full. Use it as-is. } +#if CACHE_ALLOW_FULL_UTILIZATION + else if (capacity <= FULL_UTILIZATION_CACHE_SIZE && newOccupied + CACHE_END_MARKER <= capacity) { + // Allow 100% cache utilization for small buckets. Use it as-is. + } +#endif else { capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE; if (capacity > MAX_CACHE_SIZE) { @@ -671,12 +876,11 @@ void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver) mask_t i = begin; // Scan for the first unused slot and insert there. - // There is guaranteed to be an empty slot because the - // minimum size is 4 and we resized at 3/4 full. + // There is guaranteed to be an empty slot. do { if (fastpath(b[i].sel() == 0)) { incrementOccupied(); - b[i].set(sel, imp, cls); + b[i].set(b, sel, imp, cls()); return; } if (b[i].sel() == sel) { @@ -686,31 +890,54 @@ void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver) } } while (fastpath((i = cache_next(i, m)) != begin)); - cache_t::bad_cache(receiver, (SEL)sel, cls); + bad_cache(receiver, (SEL)sel); +#endif // !DEBUG_TASK_THREADS } -void cache_fill(Class cls, SEL sel, IMP imp, id receiver) +void cache_t::copyCacheNolock(objc_imp_cache_entry *buffer, int len) { - runtimeLock.assertLocked(); - -#if !DEBUG_TASK_THREADS - // Never cache before +initialize is done - if (cls->isInitialized()) { - cache_t *cache = getCache(cls); #if CONFIG_USE_CACHE_LOCK - mutex_locker_t lock(cacheUpdateLock); -#endif - cache->insert(cls, sel, imp, receiver); - } + cacheUpdateLock.assertLocked(); #else - _collecting_in_critical(); + runtimeLock.assertLocked(); #endif -} + int wpos = 0; +#if CONFIG_USE_PREOPT_CACHES + if (isConstantOptimizedCache()) { + auto cache = preopt_cache(); + auto mask = cache->mask; + uintptr_t sel_base = objc_opt_offsets[OBJC_OPT_METHODNAME_START]; + uintptr_t imp_base = (uintptr_t)&cache->entries; + + for (uintptr_t index = 0; index <= mask && wpos < len; index++) { + auto &ent = cache->entries[index]; + if (~ent.sel_offs) { + buffer[wpos].sel = (SEL)(sel_base + ent.sel_offs); + buffer[wpos].imp = (IMP)(imp_base - ent.imp_offs); + wpos++; + } + } + return; + } +#endif + { + bucket_t *buckets = this->buckets(); + uintptr_t count = capacity(); + + for (uintptr_t index = 0; index < count && wpos < len; index++) { + if (buckets[index].sel()) { + buffer[wpos].imp = buckets[index].imp(buckets, cls()); + buffer[wpos].sel = buckets[index].sel(); + wpos++; + } + } + } +} // Reset this entire cache to the uncached lookup by reallocating it. // This must not shrink the cache - that breaks the lock-free scheme. -void cache_erase_nolock(Class cls) +void cache_t::eraseNolock(const char *func) { #if CONFIG_USE_CACHE_LOCK cacheUpdateLock.assertLocked(); @@ -718,29 +945,36 @@ void cache_erase_nolock(Class cls) runtimeLock.assertLocked(); #endif - cache_t *cache = getCache(cls); - - mask_t capacity = cache->capacity(); - if (capacity > 0 && cache->occupied() > 0) { - auto oldBuckets = cache->buckets(); + if (isConstantOptimizedCache()) { + auto c = cls(); + if (PrintCaches) { + _objc_inform("CACHES: %sclass %s: dropping and disallowing preopt cache (from %s)", + c->isMetaClass() ? "meta" : "", + c->nameForLogging(), func); + } + setBucketsAndMask(emptyBuckets(), 0); + c->setDisallowPreoptCaches(); + } else if (occupied() > 0) { + auto capacity = this->capacity(); + auto oldBuckets = buckets(); auto buckets = emptyBucketsForCapacity(capacity); - cache->setBucketsAndMask(buckets, capacity - 1); // also clears occupied - cache_collect_free(oldBuckets, capacity); + setBucketsAndMask(buckets, capacity - 1); // also clears occupied + collect_free(oldBuckets, capacity); } } -void cache_delete(Class cls) +void cache_t::destroy() { #if CONFIG_USE_CACHE_LOCK mutex_locker_t lock(cacheUpdateLock); #else runtimeLock.assertLocked(); #endif - if (cls->cache.canBeFreed()) { - if (PrintCaches) recordDeadCache(cls->cache.capacity()); - free(cls->cache.buckets()); + if (canBeFreed()) { + if (PrintCaches) recordDeadCache(capacity()); + free(buckets()); } } @@ -817,7 +1051,7 @@ extern "C" task_restartable_range_t objc_restartableRanges[]; static bool shouldUseRestartableRanges = true; #endif -void cache_init() +void cache_t::init() { #if HAVE_TASK_RESTARTABLE_RANGES mach_msg_type_number_t count = 0; @@ -883,7 +1117,18 @@ static int _collecting_in_critical(void) continue; // Find out where thread is executing +#if TARGET_OS_OSX + if (oah_is_current_process_translated()) { + kern_return_t ret = objc_thread_get_rip(threads[count], (uint64_t*)&pc); + if (ret != KERN_SUCCESS) { + pc = PC_SENTINEL; + } + } else { + pc = _get_pc_for_thread (threads[count]); + } +#else pc = _get_pc_for_thread (threads[count]); +#endif // Check for bad status, and if so, assume the worse (can't collect) if (pc == PC_SENTINEL) @@ -968,13 +1213,13 @@ static void _garbage_make_room(void) /*********************************************************************** -* cache_collect_free. Add the specified malloc'd memory to the list +* cache_t::collect_free. Add the specified malloc'd memory to the list * of them to free at some later point. * size is used for the collection threshold. It does not have to be * precisely the block's size. * Cache locks: cacheUpdateLock must be held by the caller. **********************************************************************/ -static void cache_collect_free(bucket_t *data, mask_t capacity) +void cache_t::collect_free(bucket_t *data, mask_t capacity) { #if CONFIG_USE_CACHE_LOCK cacheUpdateLock.assertLocked(); @@ -987,7 +1232,7 @@ static void cache_collect_free(bucket_t *data, mask_t capacity) _garbage_make_room (); garbage_byte_size += cache_t::bytesForCapacity(capacity); garbage_refs[garbage_count++] = data; - cache_collect(false); + cache_t::collectNolock(false); } @@ -996,7 +1241,7 @@ static void cache_collect_free(bucket_t *data, mask_t capacity) * collectALot tries harder to free memory. * Cache locks: cacheUpdateLock must be held by the caller. **********************************************************************/ -void cache_collect(bool collectALot) +void cache_t::collectNolock(bool collectALot) { #if CONFIG_USE_CACHE_LOCK cacheUpdateLock.assertLocked(); @@ -1293,6 +1538,41 @@ static kern_return_t objc_task_threads // DEBUG_TASK_THREADS #endif +OBJC_EXPORT bucket_t * objc_cache_buckets(const cache_t * cache) { + return cache->buckets(); +} + +#if CONFIG_USE_PREOPT_CACHES + +OBJC_EXPORT const preopt_cache_t * _Nonnull objc_cache_preoptCache(const cache_t * _Nonnull cache) { + return cache->preopt_cache(); +} + +OBJC_EXPORT bool objc_cache_isConstantOptimizedCache(const cache_t * _Nonnull cache, bool strict, uintptr_t empty_addr) { + return cache->isConstantOptimizedCache(strict, empty_addr); +} + +OBJC_EXPORT unsigned objc_cache_preoptCapacity(const cache_t * _Nonnull cache) { + return cache->preopt_cache()->capacity(); +} + +OBJC_EXPORT Class _Nonnull objc_cache_preoptFallbackClass(const cache_t * _Nonnull cache) { + return cache->preoptFallbackClass(); +} + +#endif + +OBJC_EXPORT size_t objc_cache_bytesForCapacity(uint32_t cap) { + return cache_t::bytesForCapacity(cap); +} + +OBJC_EXPORT uint32_t objc_cache_occupied(const cache_t * _Nonnull cache) { + return cache->occupied(); +} + +OBJC_EXPORT unsigned objc_cache_capacity(const struct cache_t * _Nonnull cache) { + return cache->capacity(); +} // __OBJC2__ #endif diff --git a/runtime/objc-class-old.mm b/runtime/objc-class-old.mm index acc269e..c0a79a7 100644 --- a/runtime/objc-class-old.mm +++ b/runtime/objc-class-old.mm @@ -336,7 +336,7 @@ static void _class_resolveClassMethod(id inst, SEL sel, Class cls) ASSERT(cls->isMetaClass()); SEL resolve_sel = @selector(resolveClassMethod:); - if (!lookUpImpOrNil(inst, resolve_sel, cls)) { + if (!lookUpImpOrNilTryCache(inst, resolve_sel, cls)) { // Resolver not implemented. return; } @@ -346,7 +346,7 @@ static void _class_resolveClassMethod(id inst, SEL sel, Class cls) // Cache the result (good or bad) so the resolver doesn't fire next time. // +resolveClassMethod adds to self->ISA() a.k.a. cls - IMP imp = lookUpImpOrNil(inst, sel, cls); + IMP imp = lookUpImpOrNilTryCache(inst, sel, cls); if (resolved && PrintResolving) { if (imp) { _objc_inform("RESOLVE: method %c[%s %s] " @@ -376,7 +376,7 @@ static void _class_resolveInstanceMethod(id inst, SEL sel, Class cls) { SEL resolve_sel = @selector(resolveInstanceMethod:); - if (! lookUpImpOrNil(cls, resolve_sel, cls->ISA())) { + if (! lookUpImpOrNilTryCache(cls, resolve_sel, cls->ISA())) { // Resolver not implemented. return; } @@ -386,7 +386,7 @@ static void _class_resolveInstanceMethod(id inst, SEL sel, Class cls) // Cache the result (good or bad) so the resolver doesn't fire next time. // +resolveInstanceMethod adds to self a.k.a. cls - IMP imp = lookUpImpOrNil(inst, sel, cls); + IMP imp = lookUpImpOrNilTryCache(inst, sel, cls); if (resolved && PrintResolving) { if (imp) { @@ -424,7 +424,7 @@ _class_resolveMethod(id inst, SEL sel, Class cls) // try [nonMetaClass resolveClassMethod:sel] // and [cls resolveInstanceMethod:sel] _class_resolveClassMethod(inst, sel, cls); - if (!lookUpImpOrNil(inst, sel, cls)) { + if (!lookUpImpOrNilTryCache(inst, sel, cls)) { _class_resolveInstanceMethod(inst, sel, cls); } } @@ -2593,8 +2593,7 @@ id object_reallocFromZone(id obj, size_t nBytes, void *z) void *object_getIndexedIvars(id obj) { // ivars are tacked onto the end of the object - if (!obj) return nil; - if (obj->isTaggedPointer()) return nil; + if (obj->isTaggedPointerOrNil()) return nil; return ((char *) obj) + obj->ISA()->alignedInstanceSize(); } diff --git a/runtime/objc-class.mm b/runtime/objc-class.mm index 776f3fa..13ea069 100644 --- a/runtime/objc-class.mm +++ b/runtime/objc-class.mm @@ -159,6 +159,9 @@ #include "objc-private.h" #include "objc-abi.h" #include +#if !TARGET_OS_WIN32 +#include +#endif /*********************************************************************** * Information about multi-thread support: @@ -195,9 +198,9 @@ Class object_setClass(id obj, Class cls) // weakly-referenced object has an un-+initialized isa. // Unresolved future classes are not so protected. if (!cls->isFuture() && !cls->isInitialized()) { - // use lookUpImpOrNil to indirectly provoke +initialize + // use lookUpImpOrNilTryCache to indirectly provoke +initialize // to avoid duplicating the code to actually send +initialize - lookUpImpOrNil(nil, @selector(initialize), cls, LOOKUP_INITIALIZE); + lookUpImpOrNilTryCache(nil, @selector(initialize), cls, LOOKUP_INITIALIZE); } return obj->changeIsa(cls); @@ -281,7 +284,7 @@ _class_lookUpIvar(Class cls, Ivar ivar, ptrdiff_t& ivarOffset, // Preflight the hasAutomaticIvars check // because _class_getClassForIvar() may need to take locks. bool hasAutomaticIvars = NO; - for (Class c = cls; c; c = c->superclass) { + for (Class c = cls; c; c = c->getSuperclass()) { if (c->hasAutomaticIvars()) { hasAutomaticIvars = YES; break; @@ -337,7 +340,7 @@ _class_getIvarMemoryManagement(Class cls, Ivar ivar) static ALWAYS_INLINE void _object_setIvar(id obj, Ivar ivar, id value, bool assumeStrong) { - if (!obj || !ivar || obj->isTaggedPointer()) return; + if (!ivar || obj->isTaggedPointerOrNil()) return; ptrdiff_t offset; objc_ivar_memory_management_t memoryManagement; @@ -371,7 +374,7 @@ void object_setIvarWithStrongDefault(id obj, Ivar ivar, id value) id object_getIvar(id obj, Ivar ivar) { - if (!obj || !ivar || obj->isTaggedPointer()) return nil; + if (!ivar || obj->isTaggedPointerOrNil()) return nil; ptrdiff_t offset; objc_ivar_memory_management_t memoryManagement; @@ -393,7 +396,7 @@ Ivar _object_setInstanceVariable(id obj, const char *name, void *value, { Ivar ivar = nil; - if (obj && name && !obj->isTaggedPointer()) { + if (name && !obj->isTaggedPointerOrNil()) { if ((ivar = _class_getVariable(obj->ISA(), name))) { _object_setIvar(obj, ivar, (id)value, assumeStrong); } @@ -415,7 +418,7 @@ Ivar object_setInstanceVariableWithStrongDefault(id obj, const char *name, Ivar object_getInstanceVariable(id obj, const char *name, void **value) { - if (obj && name && !obj->isTaggedPointer()) { + if (name && !obj->isTaggedPointerOrNil()) { Ivar ivar; if ((ivar = class_getInstanceVariable(obj->ISA(), name))) { if (value) *value = (void *)object_getIvar(obj, ivar); @@ -440,7 +443,7 @@ static void object_cxxDestructFromClass(id obj, Class cls) // Call cls's dtor first, then superclasses's dtors. - for ( ; cls; cls = cls->superclass) { + for ( ; cls; cls = cls->getSuperclass()) { if (!cls->hasCxxDtor()) return; dtor = (void(*)(id)) lookupMethodInClassAndLoadCache(cls, SEL_cxx_destruct); @@ -462,8 +465,7 @@ static void object_cxxDestructFromClass(id obj, Class cls) **********************************************************************/ void object_cxxDestruct(id obj) { - if (!obj) return; - if (obj->isTaggedPointer()) return; + if (obj->isTaggedPointerOrNil()) return; object_cxxDestructFromClass(obj, obj->ISA()); } @@ -491,7 +493,7 @@ object_cxxConstructFromClass(id obj, Class cls, int flags) id (*ctor)(id); Class supercls; - supercls = cls->superclass; + supercls = cls->getSuperclass(); // Call superclasses' ctors first, if any. if (supercls && supercls->hasCxxCtor()) { @@ -510,7 +512,7 @@ object_cxxConstructFromClass(id obj, Class cls, int flags) } if (fastpath((*ctor)(obj))) return obj; // ctor called and succeeded - ok - supercls = cls->superclass; // this reload avoids a spill on the stack + supercls = cls->getSuperclass(); // this reload avoids a spill on the stack // This class's ctor was called and failed. // Call superclasses's dtors to clean up. @@ -530,7 +532,7 @@ object_cxxConstructFromClass(id obj, Class cls, int flags) **********************************************************************/ void fixupCopiedIvars(id newObject, id oldObject) { - for (Class cls = oldObject->ISA(); cls; cls = cls->superclass) { + for (Class cls = oldObject->ISA(); cls; cls = cls->getSuperclass()) { if (cls->hasAutomaticIvars()) { // Use alignedInstanceStart() because unaligned bytes at the start // of this class's ivars are not represented in the layout bitmap. @@ -636,12 +638,12 @@ BOOL class_respondsToSelector(Class cls, SEL sel) // inst is an instance of cls or a subclass thereof, or nil if none is known. // Non-nil inst is faster in some cases. See lookUpImpOrForward() for details. -NEVER_INLINE BOOL +NEVER_INLINE __attribute__((flatten)) BOOL class_respondsToSelector_inst(id inst, SEL sel, Class cls) { // Avoids +initialize because it historically did so. // We're not returning a callable IMP anyway. - return sel && cls && lookUpImpOrNil(inst, sel, cls, LOOKUP_RESOLVER); + return sel && cls && lookUpImpOrNilTryCache(inst, sel, cls, LOOKUP_RESOLVER); } @@ -662,13 +664,16 @@ IMP class_lookupMethod(Class cls, SEL sel) return class_getMethodImplementation(cls, sel); } +__attribute__((flatten)) IMP class_getMethodImplementation(Class cls, SEL sel) { IMP imp; if (!cls || !sel) return nil; - imp = lookUpImpOrNil(nil, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER); + lockdebug_assert_no_locks_locked_except({ &loadMethodLock }); + + imp = lookUpImpOrNilTryCache(nil, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER); // Translate forwarding function to C-callable external version if (!imp) { @@ -775,7 +780,7 @@ Class _calloc_class(size_t size) Class class_getSuperclass(Class cls) { if (!cls) return nil; - return cls->superclass; + return cls->getSuperclass(); } BOOL class_isMetaClass(Class cls) @@ -886,6 +891,15 @@ inform_duplicate(const char *name, Class oldCls, Class newCls) const header_info *newHeader = _headerForClass(newCls); const char *oldName = oldHeader ? oldHeader->fname() : "??"; const char *newName = newHeader ? newHeader->fname() : "??"; + const objc_duplicate_class **_dupi = NULL; + + LINKER_SET_FOREACH(_dupi, const objc_duplicate_class **, "__objc_dupclass") { + const objc_duplicate_class *dupi = *_dupi; + + if (strcmp(dupi->name, name) == 0) { + return; + } + } (DebugDuplicateClasses ? _objc_fatal : _objc_inform) ("Class %s is implemented in both %s (%p) and %s (%p). " diff --git a/runtime/objc-config.h b/runtime/objc-config.h index ee659db..cac827e 100644 --- a/runtime/objc-config.h +++ b/runtime/objc-config.h @@ -26,15 +26,6 @@ #include -// Define __OBJC2__ for the benefit of our asm files. -#ifndef __OBJC2__ -# if TARGET_OS_OSX && !TARGET_OS_IOSMAC && __i386__ - // old ABI -# else -# define __OBJC2__ 1 -# endif -#endif - // Avoid the !NDEBUG double negative. #if !NDEBUG # define DEBUG 1 @@ -51,7 +42,7 @@ #endif // Define SUPPORT_ZONES=1 to enable malloc zone support in NXHashTable. -#if !(TARGET_OS_OSX || TARGET_OS_IOSMAC) +#if !(TARGET_OS_OSX || TARGET_OS_MACCATALYST) # define SUPPORT_ZONES 0 #else # define SUPPORT_ZONES 1 @@ -73,7 +64,7 @@ // Define SUPPORT_TAGGED_POINTERS=1 to enable tagged pointer objects // Be sure to edit tagged pointer SPI in objc-internal.h as well. -#if !(__OBJC2__ && __LP64__) +#if !__LP64__ # define SUPPORT_TAGGED_POINTERS 0 #else # define SUPPORT_TAGGED_POINTERS 1 @@ -82,7 +73,7 @@ // Define SUPPORT_MSB_TAGGED_POINTERS to use the MSB // as the tagged pointer marker instead of the LSB. // Be sure to edit tagged pointer SPI in objc-internal.h as well. -#if !SUPPORT_TAGGED_POINTERS || (TARGET_OS_OSX || TARGET_OS_IOSMAC) +#if !SUPPORT_TAGGED_POINTERS || ((TARGET_OS_OSX || TARGET_OS_MACCATALYST) && __x86_64__) # define SUPPORT_MSB_TAGGED_POINTERS 0 #else # define SUPPORT_MSB_TAGGED_POINTERS 1 @@ -101,7 +92,7 @@ // Define SUPPORT_PACKED_ISA=1 on platforms that store the class in the isa // field as a maskable pointer with other data around it. #if (!__LP64__ || TARGET_OS_WIN32 || \ - (TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC)) + (TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST && !__arm64__)) # define SUPPORT_PACKED_ISA 0 #else # define SUPPORT_PACKED_ISA 1 @@ -126,7 +117,7 @@ // Define SUPPORT_ZEROCOST_EXCEPTIONS to use "zero-cost" exceptions for OBJC2. // Be sure to edit objc-exception.h as well (objc_add/removeExceptionHandler) -#if !__OBJC2__ || (defined(__arm__) && __USING_SJLJ_EXCEPTIONS__) +#if defined(__arm__) && __USING_SJLJ_EXCEPTIONS__ # define SUPPORT_ZEROCOST_EXCEPTIONS 0 #else # define SUPPORT_ZEROCOST_EXCEPTIONS 1 @@ -162,9 +153,16 @@ # define SUPPORT_MESSAGE_LOGGING 1 #endif +// Define SUPPORT_AUTORELEASEPOOL_DEDDUP_PTRS to combine consecutive pointers to the same object in autorelease pools +#if !__LP64__ +# define SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS 0 +#else +# define SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS 1 +#endif + // Define HAVE_TASK_RESTARTABLE_RANGES to enable usage of // task_restartable_ranges_synchronize() -#if TARGET_OS_SIMULATOR || defined(__i386__) || defined(__arm__) || !TARGET_OS_MAC || defined(DARLING) +#if TARGET_OS_SIMULATOR || defined(__i386__) || defined(__arm__) || !TARGET_OS_MAC # define HAVE_TASK_RESTARTABLE_RANGES 0 #else # define HAVE_TASK_RESTARTABLE_RANGES 1 @@ -178,16 +176,12 @@ // because objc-class.h is public and objc-config.h is not. //#define OBJC_INSTRUMENTED -// In __OBJC2__, the runtimeLock is a mutex always held -// hence the cache lock is redundant and can be elided. +// The runtimeLock is a mutex always held hence the cache lock is +// redundant and can be elided. // // If the runtime lock ever becomes a rwlock again, // the cache lock would need to be used again -#if __OBJC2__ #define CONFIG_USE_CACHE_LOCK 0 -#else -#define CONFIG_USE_CACHE_LOCK 1 -#endif // Determine how the method cache stores IMPs. #define CACHE_IMP_ENCODING_NONE 1 // Method cache contains raw IMP. @@ -208,13 +202,75 @@ #define CACHE_MASK_STORAGE_OUTLINED 1 #define CACHE_MASK_STORAGE_HIGH_16 2 #define CACHE_MASK_STORAGE_LOW_4 3 +#define CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS 4 #if defined(__arm64__) && __LP64__ +#if TARGET_OS_OSX || TARGET_OS_SIMULATOR +#define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS +#else #define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_HIGH_16 +#endif #elif defined(__arm64__) && !__LP64__ #define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_LOW_4 #else #define CACHE_MASK_STORAGE CACHE_MASK_STORAGE_OUTLINED #endif +// Constants used for signing/authing isas. This doesn't quite belong +// here, but the asm files can't import other headers. +#define ISA_SIGNING_DISCRIMINATOR 0x6AE1 +#define ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS 0xB5AB + +#define ISA_SIGNING_KEY ptrauth_key_process_independent_data + +// ISA signing authentication modes. Set ISA_SIGNING_AUTH_MODE to one +// of these to choose how ISAs are authenticated. +#define ISA_SIGNING_STRIP 1 // Strip the signature whenever reading an ISA. +#define ISA_SIGNING_AUTH 2 // Authenticate the signature on all ISAs. + + +// ISA signing modes. Set ISA_SIGNING_SIGN_MODE to one of these to +// choose how ISAs are signed. +#define ISA_SIGNING_SIGN_NONE 1 // Sign no ISAs. +#define ISA_SIGNING_SIGN_ONLY_SWIFT 2 // Only sign ISAs of Swift objects. +#define ISA_SIGNING_SIGN_ALL 3 // Sign all ISAs. + +#if __has_feature(ptrauth_objc_isa_strips) || __has_feature(ptrauth_objc_isa_signs) || __has_feature(ptrauth_objc_isa_authenticates) +# if __has_feature(ptrauth_objc_isa_authenticates) +# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_AUTH +# else +# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_STRIP +# endif +# if __has_feature(ptrauth_objc_isa_signs) +# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_ALL +# else +# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_NONE +# endif +#else +# if __has_feature(ptrauth_objc_isa) +# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_AUTH +# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_ALL +# else +# define ISA_SIGNING_AUTH_MODE ISA_SIGNING_STRIP +# define ISA_SIGNING_SIGN_MODE ISA_SIGNING_SIGN_NONE +# endif +#endif + +// When set, an unsigned superclass pointer is treated as Nil, which +// will treat the class as if its superclass was weakly linked and +// not loaded, and cause uses of the class to resolve to Nil. +#define SUPERCLASS_SIGNING_TREAT_UNSIGNED_AS_NIL 0 + +#if defined(__arm64__) && TARGET_OS_IOS && !TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST +#define CONFIG_USE_PREOPT_CACHES 1 +#else +#define CONFIG_USE_PREOPT_CACHES 0 +#endif + +// When set to 1, small methods in the shared cache have a direct +// offset to a selector. When set to 0, small methods in the shared +// cache have the same format as other small methods, with an offset +// to a selref. +#define CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS 1 + #endif diff --git a/runtime/objc-env.h b/runtime/objc-env.h index ccdceb6..7083b3e 100644 --- a/runtime/objc-env.h +++ b/runtime/objc-env.h @@ -36,6 +36,7 @@ OPTION( DebugMissingPools, OBJC_DEBUG_MISSING_POOLS, "warn about a OPTION( DebugPoolAllocation, OBJC_DEBUG_POOL_ALLOCATION, "halt when autorelease pools are popped out of order, and allow heap debuggers to track autorelease pools") OPTION( DebugDuplicateClasses, OBJC_DEBUG_DUPLICATE_CLASSES, "halt when multiple classes with the same name are present") OPTION( DebugDontCrash, OBJC_DEBUG_DONT_CRASH, "halt the process by exiting instead of crashing") +OPTION( DebugPoolDepth, OBJC_DEBUG_POOL_DEPTH, "log fault when at least a set number of autorelease pages has been allocated") OPTION( DisableVtables, OBJC_DISABLE_VTABLES, "disable vtable dispatch") OPTION( DisablePreopt, OBJC_DISABLE_PREOPTIMIZATION, "disable preoptimization courtesy of dyld shared cache") @@ -43,3 +44,7 @@ OPTION( DisableTaggedPointers, OBJC_DISABLE_TAGGED_POINTERS, "disable tagg OPTION( DisableTaggedPointerObfuscation, OBJC_DISABLE_TAG_OBFUSCATION, "disable obfuscation of tagged pointers") OPTION( DisableNonpointerIsa, OBJC_DISABLE_NONPOINTER_ISA, "disable non-pointer isa fields") OPTION( DisableInitializeForkSafety, OBJC_DISABLE_INITIALIZE_FORK_SAFETY, "disable safety checks for +initialize after fork") +OPTION( DisableFaults, OBJC_DISABLE_FAULTS, "disable os faults") +OPTION( DisablePreoptCaches, OBJC_DISABLE_PREOPTIMIZED_CACHES, "disable preoptimized caches") +OPTION( DisableAutoreleaseCoalescing, OBJC_DISABLE_AUTORELEASE_COALESCING, "disable coalescing of autorelease pool pointers") +OPTION( DisableAutoreleaseCoalescingLRU, OBJC_DISABLE_AUTORELEASE_COALESCING_LRU, "disable coalescing of autorelease pool pointers using look back N strategy") diff --git a/runtime/objc-exception.mm b/runtime/objc-exception.mm index 6c318c6..2b794e6 100644 --- a/runtime/objc-exception.mm +++ b/runtime/objc-exception.mm @@ -440,7 +440,7 @@ static int _objc_default_exception_matcher(Class catch_cls, id exception) Class cls; for (cls = exception->getIsa(); cls != nil; - cls = cls->superclass) + cls = cls->getSuperclass()) { if (cls == catch_cls) return 1; } diff --git a/runtime/objc-file.h b/runtime/objc-file.h index 23c0da1..597fd3b 100644 --- a/runtime/objc-file.h +++ b/runtime/objc-file.h @@ -38,10 +38,14 @@ extern message_ref_t *_getObjc2MessageRefs(const header_info *hi, size_t *count) extern Class*_getObjc2ClassRefs(const header_info *hi, size_t *count); extern Class*_getObjc2SuperRefs(const header_info *hi, size_t *count); extern classref_t const *_getObjc2ClassList(const header_info *hi, size_t *count); -extern classref_t const *_getObjc2NonlazyClassList(const header_info *hi, size_t *count); -extern category_t * const *_getObjc2CategoryList(const header_info *hi, size_t *count); -extern category_t * const *_getObjc2CategoryList2(const header_info *hi, size_t *count); -extern category_t * const *_getObjc2NonlazyCategoryList(const header_info *hi, size_t *count); +// Use hi->nlclslist() instead +// extern classref_t const *_getObjc2NonlazyClassList(const header_info *hi, size_t *count); +// Use hi->catlist() instead +// extern category_t * const *_getObjc2CategoryList(const header_info *hi, size_t *count); +// Use hi->catlist2() instead +// extern category_t * const *_getObjc2CategoryList2(const header_info *hi, size_t *count); +// Use hi->nlcatlist() instead +// extern category_t * const *_getObjc2NonlazyCategoryList(const header_info *hi, size_t *count); extern protocol_t * const *_getObjc2ProtocolList(const header_info *hi, size_t *count); extern protocol_t **_getObjc2ProtocolRefs(const header_info *hi, size_t *count); @@ -50,6 +54,10 @@ struct UnsignedInitializer { private: uintptr_t storage; public: + UnsignedInitializer(uint32_t offset) { + storage = (uintptr_t)&_mh_dylib_header + offset; + } + void operator () () const { using Initializer = void(*)(); Initializer init = @@ -62,8 +70,11 @@ public: extern UnsignedInitializer *getLibobjcInitializers(const header_info *hi, size_t *count); extern classref_t const *_getObjc2NonlazyClassList(const headerType *mhdr, size_t *count); +extern category_t * const *_getObjc2CategoryList(const headerType *mhdr, size_t *count); +extern category_t * const *_getObjc2CategoryList2(const headerType *mhdr, size_t *count); extern category_t * const *_getObjc2NonlazyCategoryList(const headerType *mhdr, size_t *count); extern UnsignedInitializer *getLibobjcInitializers(const headerType *mhdr, size_t *count); +extern uint32_t *getLibobjcInitializerOffsets(const headerType *hi, size_t *count); static inline void foreach_data_segment(const headerType *mhdr, @@ -83,11 +94,12 @@ foreach_data_segment(const headerType *mhdr, seg = (const segmentType *)((char *)seg + seg->cmdsize); } - // enumerate __DATA* segments + // enumerate __DATA* and __AUTH* segments seg = (const segmentType *) (mhdr + 1); for (unsigned long i = 0; i < mhdr->ncmds; i++) { if (seg->cmd == SEGMENT_CMD && - segnameStartsWith(seg->segname, "__DATA")) + (segnameStartsWith(seg->segname, "__DATA") || + segnameStartsWith(seg->segname, "__AUTH"))) { code(seg, slide); } diff --git a/runtime/objc-file.mm b/runtime/objc-file.mm index ffde2fd..c7ff5ca 100644 --- a/runtime/objc-file.mm +++ b/runtime/objc-file.mm @@ -68,6 +68,12 @@ GETSECT(_getObjc2ProtocolList, protocol_t * const, "__objc_protolist") GETSECT(_getObjc2ProtocolRefs, protocol_t *, "__objc_protorefs"); GETSECT(getLibobjcInitializers, UnsignedInitializer, "__objc_init_func"); +uint32_t *getLibobjcInitializerOffsets(const headerType *mhdr, size_t *outCount) { + unsigned long byteCount = 0; + uint32_t *offsets = (uint32_t *)getsectiondata(mhdr, "__TEXT", "__objc_init_offs", &byteCount); + if (outCount) *outCount = byteCount / sizeof(uint32_t); + return offsets; +} objc_image_info * _getObjcImageInfo(const headerType *mhdr, size_t *outBytes) diff --git a/runtime/objc-gdb.h b/runtime/objc-gdb.h index 9cab4a3..99cff42 100644 --- a/runtime/objc-gdb.h +++ b/runtime/objc-gdb.h @@ -219,6 +219,10 @@ OBJC_EXPORT uintptr_t objc_debug_taggedpointer_mask OBJC_EXPORT uintptr_t objc_debug_taggedpointer_obfuscator OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0); +#if OBJC_SPLIT_TAGGED_POINTERS +OBJC_EXPORT uint8_t objc_debug_tag60_permutations[8]; +#endif + // tag_slot = (obj >> slot_shift) & slot_mask OBJC_EXPORT unsigned int objc_debug_taggedpointer_slot_shift @@ -266,6 +270,9 @@ OBJC_EXPORT unsigned int objc_debug_taggedpointer_ext_payload_lshift OBJC_EXPORT unsigned int objc_debug_taggedpointer_ext_payload_rshift OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0, 2.0); +OBJC_EXPORT uintptr_t objc_debug_constant_cfstring_tag_bits + OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 6.0); + #endif @@ -289,6 +296,9 @@ OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset OBJC_AVA OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0); OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0); OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0); +#if __OBJC2__ +OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 6.0); +#endif __END_DECLS diff --git a/runtime/objc-initialize.mm b/runtime/objc-initialize.mm index 4713325..8f98cbd 100644 --- a/runtime/objc-initialize.mm +++ b/runtime/objc-initialize.mm @@ -396,10 +396,10 @@ static bool classHasTrivialInitialize(Class cls) { if (cls->isRootClass() || cls->isRootMetaclass()) return true; - Class rootCls = cls->ISA()->ISA()->superclass; + Class rootCls = cls->ISA()->ISA()->getSuperclass(); - IMP rootImp = lookUpImpOrNil(rootCls, @selector(initialize), rootCls->ISA()); - IMP imp = lookUpImpOrNil(cls, @selector(initialize), cls->ISA()); + IMP rootImp = lookUpImpOrNilTryCache(rootCls, @selector(initialize), rootCls->ISA()); + IMP imp = lookUpImpOrNilTryCache(cls, @selector(initialize), cls->ISA()); return (imp == nil || imp == (IMP)&objc_noop_imp || imp == rootImp); } @@ -500,7 +500,7 @@ void initializeNonMetaClass(Class cls) // Make sure super is done initializing BEFORE beginning to initialize cls. // See note about deadlock above. - supercls = cls->superclass; + supercls = cls->getSuperclass(); if (supercls && !supercls->isInitialized()) { initializeNonMetaClass(supercls); } diff --git a/runtime/objc-internal.h b/runtime/objc-internal.h index afc82f5..ad40a1c 100644 --- a/runtime/objc-internal.h +++ b/runtime/objc-internal.h @@ -44,6 +44,11 @@ #include #include +// Include NSObject.h only if we're ObjC. Module imports get unhappy +// otherwise. +#if __OBJC__ +#include +#endif // Termination reasons in the OS_REASON_OBJC namespace. #define OBJC_EXIT_REASON_UNSPECIFIED 1 @@ -54,6 +59,18 @@ // The runtime's class structure will never grow beyond this. #define OBJC_MAX_CLASS_SIZE (32*sizeof(void*)) +// Private objc_setAssociatedObject policy modifier. When an object is +// destroyed, associated objects attached to that object that are marked with +// this will be released after all associated objects not so marked. +// +// In addition, such associations are not removed when calling +// objc_removeAssociatedObjects. +// +// NOTE: This should be used sparingly. Performance will be poor when a single +// object has more than a few (deliberately vague) associated objects marked +// with this flag. If you're not sure if you should use this, you should not use +// this! +#define _OBJC_ASSOCIATION_SYSTEM_OBJECT (1 << 16) __BEGIN_DECLS @@ -160,8 +177,14 @@ OBJC_EXPORT objc_imp_cache_entry *_Nullable class_copyImpCache(Class _Nonnull cls, int * _Nullable outCount) OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 5.0); + +OBJC_EXPORT +unsigned long +sel_hash(SEL _Nullable sel) + OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 6.0); #endif + // Plainly-implemented GC barriers. Rosetta used to use these. OBJC_EXPORT id _Nullable objc_assign_strongCast_generic(id _Nullable value, id _Nullable * _Nonnull dest) @@ -199,7 +222,7 @@ OBJC_EXPORT void _objc_setClassLoader(BOOL (* _Nonnull newClassLoader)(const char * _Nonnull)) OBJC2_UNAVAILABLE; -#if !(TARGET_OS_OSX && !TARGET_OS_IOSMAC && __i386__) +#if !(TARGET_OS_OSX && !TARGET_OS_MACCATALYST && __i386__) // Add a class copy fixup handler. The name is a misnomer, as // multiple calls will install multiple handlers. Older versions // of the Swift runtime call it by name, and it's only used by Swift @@ -240,6 +263,21 @@ objc_copyClassNamesForImageHeader(const struct mach_header * _Nonnull mh, unsigned int * _Nullable outCount) OBJC_AVAILABLE(10.14, 12.0, 12.0, 5.0, 3.0); +/** + * Returns the all the classes within a library. + * + * @param image The mach header for library or framework you are inquiring about. + * @param outCount The number of class names returned. + * + * @return An array of Class objects + */ + +OBJC_EXPORT Class _Nonnull * _Nullable +objc_copyClassesForImage(const char * _Nonnull image, + unsigned int * _Nullable outCount) + OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 4.0); + + // Tagged pointer objects. #if __LP64__ @@ -287,11 +325,23 @@ enum OBJC_TAG_UIColor = 17, OBJC_TAG_CGColor = 18, OBJC_TAG_NSIndexSet = 19, + OBJC_TAG_NSMethodSignature = 20, + OBJC_TAG_UTTypeRecord = 21, + + // When using the split tagged pointer representation + // (OBJC_SPLIT_TAGGED_POINTERS), this is the first tag where + // the tag and payload are unobfuscated. All tags from here to + // OBJC_TAG_Last52BitPayload are unobfuscated. The shared cache + // builder is able to construct these as long as the low bit is + // not set (i.e. even-numbered tags). + OBJC_TAG_FirstUnobfuscatedSplitTag = 136, // 128 + 8, first ext tag with high bit set + + OBJC_TAG_Constant_CFString = 136, OBJC_TAG_First60BitPayload = 0, OBJC_TAG_Last60BitPayload = 6, OBJC_TAG_First52BitPayload = 8, - OBJC_TAG_Last52BitPayload = 263, + OBJC_TAG_Last52BitPayload = 263, OBJC_TAG_RESERVED_264 = 264 }; @@ -350,7 +400,16 @@ _objc_getTaggedPointerSignedValue(const void * _Nullable ptr); // Don't use the values below. Use the declarations above. -#if (TARGET_OS_OSX || TARGET_OS_IOSMAC) && __x86_64__ +#if __arm64__ +// ARM64 uses a new tagged pointer scheme where normal tags are in +// the low bits, extended tags are in the high bits, and half of the +// extended tag space is reserved for unobfuscated payloads. +# define OBJC_SPLIT_TAGGED_POINTERS 1 +#else +# define OBJC_SPLIT_TAGGED_POINTERS 0 +#endif + +#if (TARGET_OS_OSX || TARGET_OS_MACCATALYST) && __x86_64__ // 64-bit Mac - tag bit is LSB # define OBJC_MSB_TAGGED_POINTERS 0 #else @@ -358,17 +417,37 @@ _objc_getTaggedPointerSignedValue(const void * _Nullable ptr); # define OBJC_MSB_TAGGED_POINTERS 1 #endif -#define _OBJC_TAG_INDEX_MASK 0x7 +#define _OBJC_TAG_INDEX_MASK 0x7UL + +#if OBJC_SPLIT_TAGGED_POINTERS +#define _OBJC_TAG_SLOT_COUNT 8 +#define _OBJC_TAG_SLOT_MASK 0x7UL +#else // array slot includes the tag bit itself #define _OBJC_TAG_SLOT_COUNT 16 -#define _OBJC_TAG_SLOT_MASK 0xf +#define _OBJC_TAG_SLOT_MASK 0xfUL +#endif #define _OBJC_TAG_EXT_INDEX_MASK 0xff // array slot has no extra bits #define _OBJC_TAG_EXT_SLOT_COUNT 256 #define _OBJC_TAG_EXT_SLOT_MASK 0xff -#if OBJC_MSB_TAGGED_POINTERS +#if OBJC_SPLIT_TAGGED_POINTERS +# define _OBJC_TAG_MASK (1UL<<63) +# define _OBJC_TAG_INDEX_SHIFT 0 +# define _OBJC_TAG_SLOT_SHIFT 0 +# define _OBJC_TAG_PAYLOAD_LSHIFT 1 +# define _OBJC_TAG_PAYLOAD_RSHIFT 4 +# define _OBJC_TAG_EXT_MASK (_OBJC_TAG_MASK | 0x7UL) +# define _OBJC_TAG_NO_OBFUSCATION_MASK ((1UL<<62) | _OBJC_TAG_EXT_MASK) +# define _OBJC_TAG_CONSTANT_POINTER_MASK \ + ~(_OBJC_TAG_EXT_MASK | ((uintptr_t)_OBJC_TAG_EXT_SLOT_MASK << _OBJC_TAG_EXT_SLOT_SHIFT)) +# define _OBJC_TAG_EXT_INDEX_SHIFT 55 +# define _OBJC_TAG_EXT_SLOT_SHIFT 55 +# define _OBJC_TAG_EXT_PAYLOAD_LSHIFT 9 +# define _OBJC_TAG_EXT_PAYLOAD_RSHIFT 12 +#elif OBJC_MSB_TAGGED_POINTERS # define _OBJC_TAG_MASK (1UL<<63) # define _OBJC_TAG_INDEX_SHIFT 60 # define _OBJC_TAG_SLOT_SHIFT 60 @@ -392,21 +471,64 @@ _objc_getTaggedPointerSignedValue(const void * _Nullable ptr); # define _OBJC_TAG_EXT_PAYLOAD_RSHIFT 12 #endif +// Map of tags to obfuscated tags. extern uintptr_t objc_debug_taggedpointer_obfuscator; +#if OBJC_SPLIT_TAGGED_POINTERS +extern uint8_t objc_debug_tag60_permutations[8]; + +static inline uintptr_t _objc_basicTagToObfuscatedTag(uintptr_t tag) { + return objc_debug_tag60_permutations[tag]; +} + +static inline uintptr_t _objc_obfuscatedTagToBasicTag(uintptr_t tag) { + for (unsigned i = 0; i < 7; i++) + if (objc_debug_tag60_permutations[i] == tag) + return i; + return 7; +} +#endif + static inline void * _Nonnull _objc_encodeTaggedPointer(uintptr_t ptr) { - return (void *)(objc_debug_taggedpointer_obfuscator ^ ptr); + uintptr_t value = (objc_debug_taggedpointer_obfuscator ^ ptr); +#if OBJC_SPLIT_TAGGED_POINTERS + if ((value & _OBJC_TAG_NO_OBFUSCATION_MASK) == _OBJC_TAG_NO_OBFUSCATION_MASK) + return (void *)ptr; + uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK; + uintptr_t permutedTag = _objc_basicTagToObfuscatedTag(basicTag); + value &= ~(_OBJC_TAG_INDEX_MASK << _OBJC_TAG_INDEX_SHIFT); + value |= permutedTag << _OBJC_TAG_INDEX_SHIFT; +#endif + return (void *)value; +} + +static inline uintptr_t +_objc_decodeTaggedPointer_noPermute(const void * _Nullable ptr) +{ + uintptr_t value = (uintptr_t)ptr; +#if OBJC_SPLIT_TAGGED_POINTERS + if ((value & _OBJC_TAG_NO_OBFUSCATION_MASK) == _OBJC_TAG_NO_OBFUSCATION_MASK) + return value; +#endif + return value ^ objc_debug_taggedpointer_obfuscator; } static inline uintptr_t _objc_decodeTaggedPointer(const void * _Nullable ptr) { - return (uintptr_t)ptr ^ objc_debug_taggedpointer_obfuscator; + uintptr_t value = _objc_decodeTaggedPointer_noPermute(ptr); +#if OBJC_SPLIT_TAGGED_POINTERS + uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK; + + value &= ~(_OBJC_TAG_INDEX_MASK << _OBJC_TAG_INDEX_SHIFT); + value |= _objc_obfuscatedTagToBasicTag(basicTag) << _OBJC_TAG_INDEX_SHIFT; +#endif + return value; } -static inline bool +static inline bool _objc_taggedPointersEnabled(void) { extern uintptr_t objc_debug_taggedpointer_mask; @@ -445,6 +567,15 @@ _objc_isTaggedPointer(const void * _Nullable ptr) return ((uintptr_t)ptr & _OBJC_TAG_MASK) == _OBJC_TAG_MASK; } +static inline bool +_objc_isTaggedPointerOrNil(const void * _Nullable ptr) +{ + // this function is here so that clang can turn this into + // a comparison with NULL when this is appropriate + // it turns out it's not able to in many cases without this + return !ptr || ((uintptr_t)ptr & _OBJC_TAG_MASK) == _OBJC_TAG_MASK; +} + static inline objc_tag_index_t _objc_getTaggedPointerTag(const void * _Nullable ptr) { @@ -463,7 +594,7 @@ static inline uintptr_t _objc_getTaggedPointerValue(const void * _Nullable ptr) { // ASSERT(_objc_isTaggedPointer(ptr)); - uintptr_t value = _objc_decodeTaggedPointer(ptr); + uintptr_t value = _objc_decodeTaggedPointer_noPermute(ptr); uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK; if (basicTag == _OBJC_TAG_INDEX_MASK) { return (value << _OBJC_TAG_EXT_PAYLOAD_LSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_RSHIFT; @@ -476,7 +607,7 @@ static inline intptr_t _objc_getTaggedPointerSignedValue(const void * _Nullable ptr) { // ASSERT(_objc_isTaggedPointer(ptr)); - uintptr_t value = _objc_decodeTaggedPointer(ptr); + uintptr_t value = _objc_decodeTaggedPointer_noPermute(ptr); uintptr_t basicTag = (value >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK; if (basicTag == _OBJC_TAG_INDEX_MASK) { return ((intptr_t)value << _OBJC_TAG_EXT_PAYLOAD_LSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_RSHIFT; @@ -485,6 +616,13 @@ _objc_getTaggedPointerSignedValue(const void * _Nullable ptr) } } +# if OBJC_SPLIT_TAGGED_POINTERS +static inline void * _Nullable +_objc_getTaggedPointerRawPointerValue(const void * _Nullable ptr) { + return (void *)((uintptr_t)ptr & _OBJC_TAG_CONSTANT_POINTER_MASK); +} +# endif + // OBJC_HAVE_TAGGED_POINTERS #endif @@ -595,6 +733,11 @@ _class_getIvarMemoryManagement(Class _Nullable cls, Ivar _Nonnull ivar) OBJC_EXPORT BOOL _class_isFutureClass(Class _Nullable cls) OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0, 2.0); +/// Returns true if the class is an ABI stable Swift class. (Despite +/// the name, this does NOT return true for Swift classes built with +/// Swift versions prior to 5.0.) +OBJC_EXPORT BOOL _class_isSwift(Class _Nullable cls) + OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 5.0); // API to only be called by root classes like NSObject or NSProxy @@ -876,12 +1019,47 @@ typedef void (*_objc_func_willInitializeClass)(void * _Nullable context, Class _ OBJC_EXPORT void _objc_addWillInitializeClassFunc(_objc_func_willInitializeClass _Nonnull func, void * _Nullable context) OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0); +// Replicate the conditionals in objc-config.h for packed isa, indexed isa, and preopt caches +#if __ARM_ARCH_7K__ >= 2 || (__arm64__ && !__LP64__) || \ + !(!__LP64__ || TARGET_OS_WIN32 || \ + (TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST && !__arm64__)) +OBJC_EXPORT const uintptr_t _objc_has_weak_formation_callout; +#define OBJC_WEAK_FORMATION_CALLOUT_DEFINED 1 +#else +#define OBJC_WEAK_FORMATION_CALLOUT_DEFINED 0 +#endif + +#if defined(__arm64__) && TARGET_OS_IOS && !TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST +#define CONFIG_USE_PREOPT_CACHES 1 +#else +#define CONFIG_USE_PREOPT_CACHES 0 +#endif + + +#if __OBJC2__ +// Helper function for objc4 tests only! Do not call this yourself +// for any reason ever. +OBJC_EXPORT void _method_setImplementationRawUnsafe(Method _Nonnull m, IMP _Nonnull imp) + OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 5.0); +#endif + // API to only be called by classes that provide their own reference count storage OBJC_EXPORT void _objc_deallocOnMainThreadHelper(void * _Nullable context) OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0, 2.0); +#if __OBJC__ +// Declarations for internal methods used for custom weak reference +// implementations. These declarations ensure that the compiler knows +// to exclude these methods from NS_DIRECT_MEMBERS. Do NOT implement +// these methods unless you really know what you're doing. +@interface NSObject () +- (BOOL)_tryRetain; +- (BOOL)_isDeallocating; +@end +#endif + // On async versus sync deallocation and the _dealloc2main flag // // Theory: @@ -944,7 +1122,7 @@ typedef enum { } \ } \ -(NSUInteger)retainCount { \ - return (_rc_ivar + 2) >> 1; \ + return (NSUInteger)(_rc_ivar + 2) >> 1; \ } \ -(BOOL)_tryRetain { \ __typeof__(_rc_ivar) _prev; \ @@ -966,12 +1144,12 @@ typedef enum { } else if (_rc_ivar < -2) { \ __builtin_trap(); /* BUG: over-release elsewhere */ \ } \ - return _rc_ivar & 1; \ + return (_rc_ivar & 1) != 0; \ } #define _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC(_rc_ivar, _dealloc2main) \ _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC_BLOCK(_rc_ivar, (^(id _self_ __attribute__((unused))) { \ - if (_dealloc2main && !pthread_main_np()) { \ + if ((_dealloc2main) && !pthread_main_np()) { \ return _OBJC_DEALLOC_OBJECT_LATER; \ } else { \ return _OBJC_DEALLOC_OBJECT_NOW; \ @@ -981,6 +1159,25 @@ typedef enum { #define _OBJC_SUPPORTED_INLINE_REFCNT(_rc_ivar) _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC(_rc_ivar, 0) #define _OBJC_SUPPORTED_INLINE_REFCNT_WITH_DEALLOC2MAIN(_rc_ivar) _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC(_rc_ivar, 1) + +// C cache_t wrappers for objcdt and the IMP caches test tool +struct cache_t; +struct bucket_t; +struct preopt_cache_t; +OBJC_EXPORT struct bucket_t * _Nonnull objc_cache_buckets(const struct cache_t * _Nonnull cache); +OBJC_EXPORT size_t objc_cache_bytesForCapacity(uint32_t cap); +OBJC_EXPORT uint32_t objc_cache_occupied(const struct cache_t * _Nonnull cache); +OBJC_EXPORT unsigned objc_cache_capacity(const struct cache_t * _Nonnull cache); + +#if CONFIG_USE_PREOPT_CACHES + +OBJC_EXPORT bool objc_cache_isConstantOptimizedCache(const struct cache_t * _Nonnull cache, bool strict, uintptr_t empty_addr); +OBJC_EXPORT unsigned objc_cache_preoptCapacity(const struct cache_t * _Nonnull cache); +OBJC_EXPORT Class _Nonnull objc_cache_preoptFallbackClass(const struct cache_t * _Nonnull cache); +OBJC_EXPORT const struct preopt_cache_t * _Nonnull objc_cache_preoptCache(const struct cache_t * _Nonnull cache); + +#endif + __END_DECLS #endif diff --git a/runtime/objc-lockdebug.h b/runtime/objc-lockdebug.h index a3048b1..a69ee06 100644 --- a/runtime/objc-lockdebug.h +++ b/runtime/objc-lockdebug.h @@ -24,11 +24,13 @@ #if LOCKDEBUG extern void lockdebug_assert_all_locks_locked(); extern void lockdebug_assert_no_locks_locked(); +extern void lockdebug_assert_no_locks_locked_except(std::initializer_list canBeLocked); extern void lockdebug_setInForkPrepare(bool); extern void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock); #else static constexpr inline void lockdebug_assert_all_locks_locked() { } static constexpr inline void lockdebug_assert_no_locks_locked() { } +static constexpr inline void lockdebug_assert_no_locks_locked_except(std::initializer_list canBeLocked) { }; static constexpr inline void lockdebug_setInForkPrepare(bool) { } static constexpr inline void lockdebug_lock_precedes_lock(const void *, const void *) { } #endif @@ -40,12 +42,12 @@ extern void lockdebug_mutex_unlock(mutex_tt *lock); extern void lockdebug_mutex_assert_locked(mutex_tt *lock); extern void lockdebug_mutex_assert_unlocked(mutex_tt *lock); -static constexpr inline void lockdebug_remember_mutex(mutex_tt *lock) { } -static constexpr inline void lockdebug_mutex_lock(mutex_tt *lock) { } -static constexpr inline void lockdebug_mutex_try_lock(mutex_tt *lock) { } -static constexpr inline void lockdebug_mutex_unlock(mutex_tt *lock) { } -static constexpr inline void lockdebug_mutex_assert_locked(mutex_tt *lock) { } -static constexpr inline void lockdebug_mutex_assert_unlocked(mutex_tt *lock) { } +static constexpr inline void lockdebug_remember_mutex(__unused mutex_tt *lock) { } +static constexpr inline void lockdebug_mutex_lock(__unused mutex_tt *lock) { } +static constexpr inline void lockdebug_mutex_try_lock(__unused mutex_tt *lock) { } +static constexpr inline void lockdebug_mutex_unlock(__unused mutex_tt *lock) { } +static constexpr inline void lockdebug_mutex_assert_locked(__unused mutex_tt *lock) { } +static constexpr inline void lockdebug_mutex_assert_unlocked(__unused mutex_tt *lock) { } extern void lockdebug_remember_monitor(monitor_tt *lock); @@ -55,12 +57,12 @@ extern void lockdebug_monitor_wait(monitor_tt *lock); extern void lockdebug_monitor_assert_locked(monitor_tt *lock); extern void lockdebug_monitor_assert_unlocked(monitor_tt *lock); -static constexpr inline void lockdebug_remember_monitor(monitor_tt *lock) { } -static constexpr inline void lockdebug_monitor_enter(monitor_tt *lock) { } -static constexpr inline void lockdebug_monitor_leave(monitor_tt *lock) { } -static constexpr inline void lockdebug_monitor_wait(monitor_tt *lock) { } -static constexpr inline void lockdebug_monitor_assert_locked(monitor_tt *lock) { } -static constexpr inline void lockdebug_monitor_assert_unlocked(monitor_tt *lock) {} +static constexpr inline void lockdebug_remember_monitor(__unused monitor_tt *lock) { } +static constexpr inline void lockdebug_monitor_enter(__unused monitor_tt *lock) { } +static constexpr inline void lockdebug_monitor_leave(__unused monitor_tt *lock) { } +static constexpr inline void lockdebug_monitor_wait(__unused monitor_tt *lock) { } +static constexpr inline void lockdebug_monitor_assert_locked(__unused monitor_tt *lock) { } +static constexpr inline void lockdebug_monitor_assert_unlocked(__unused monitor_tt *lock) {} extern void @@ -75,12 +77,12 @@ extern void lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt *lock); static constexpr inline void -lockdebug_remember_recursive_mutex(recursive_mutex_tt *lock) { } +lockdebug_remember_recursive_mutex(__unused recursive_mutex_tt *lock) { } static constexpr inline void -lockdebug_recursive_mutex_lock(recursive_mutex_tt *lock) { } +lockdebug_recursive_mutex_lock(__unused recursive_mutex_tt *lock) { } static constexpr inline void -lockdebug_recursive_mutex_unlock(recursive_mutex_tt *lock) { } +lockdebug_recursive_mutex_unlock(__unused recursive_mutex_tt *lock) { } static constexpr inline void -lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt *lock) { } +lockdebug_recursive_mutex_assert_locked(__unused recursive_mutex_tt *lock) { } static constexpr inline void -lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt *lock) { } +lockdebug_recursive_mutex_assert_unlocked(__unused recursive_mutex_tt *lock) { } diff --git a/runtime/objc-lockdebug.mm b/runtime/objc-lockdebug.mm index f182a27..1429c2d 100644 --- a/runtime/objc-lockdebug.mm +++ b/runtime/objc-lockdebug.mm @@ -321,10 +321,18 @@ lockdebug_assert_all_locks_locked() void lockdebug_assert_no_locks_locked() +{ + lockdebug_assert_no_locks_locked_except({}); +} + +void lockdebug_assert_no_locks_locked_except(std::initializer_list canBeLocked) { auto& owned = ownedLocks(); for (const auto& l : AllLocks()) { + if (std::find(canBeLocked.begin(), canBeLocked.end(), l.first) != canBeLocked.end()) + continue; + if (hasLock(owned, l.first, l.second.k)) { _objc_fatal("lock %p:%d is incorrectly owned", l.first, l.second.k); } diff --git a/runtime/objc-object.h b/runtime/objc-object.h index 2c17c94..d15d5a8 100644 --- a/runtime/objc-object.h +++ b/runtime/objc-object.h @@ -73,7 +73,7 @@ objc_object::isClass() #if SUPPORT_TAGGED_POINTERS -inline Class +inline Class objc_object::getIsa() { if (fastpath(!isTaggedPointer())) return ISA(); @@ -103,6 +103,12 @@ objc_object::isTaggedPointer() return _objc_isTaggedPointer(this); } +inline bool +objc_object::isTaggedPointerOrNil() +{ + return _objc_isTaggedPointerOrNil(this); +} + inline bool objc_object::isBasicTaggedPointer() { @@ -121,8 +127,7 @@ objc_object::isExtTaggedPointer() #else // not SUPPORT_TAGGED_POINTERS - -inline Class +inline Class objc_object::getIsa() { return ISA(); @@ -141,6 +146,12 @@ objc_object::isTaggedPointer() return false; } +inline bool +objc_object::isTaggedPointerOrNil() +{ + return !this; +} + inline bool objc_object::isBasicTaggedPointer() { @@ -160,21 +171,118 @@ objc_object::isExtTaggedPointer() #if SUPPORT_NONPOINTER_ISA -inline Class -objc_object::ISA() +// Set the class field in an isa. Takes both the class to set and +// a pointer to the object where the isa will ultimately be used. +// This is necessary to get the pointer signing right. +// +// Note: this method does not support setting an indexed isa. When +// indexed isas are in use, it can only be used to set the class of a +// raw isa. +inline void +isa_t::setClass(Class newCls, UNUSED_WITHOUT_PTRAUTH objc_object *obj) { - ASSERT(!isTaggedPointer()); -#if SUPPORT_INDEXED_ISA - if (isa.nonpointer) { - uintptr_t slot = isa.indexcls; - return classForIndex((unsigned)slot); - } - return (Class)isa.bits; -#else - return (Class)(isa.bits & ISA_MASK); + // Match the conditional in isa.h. +#if __has_feature(ptrauth_calls) || TARGET_OS_SIMULATOR +# if ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_NONE + // No signing, just use the raw pointer. + uintptr_t signedCls = (uintptr_t)newCls; + +# elif ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ONLY_SWIFT + // We're only signing Swift classes. Non-Swift classes just use + // the raw pointer + uintptr_t signedCls = (uintptr_t)newCls; + if (newCls->isSwiftStable()) + signedCls = (uintptr_t)ptrauth_sign_unauthenticated((void *)newCls, ISA_SIGNING_KEY, ptrauth_blend_discriminator(obj, ISA_SIGNING_DISCRIMINATOR)); + +# elif ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ALL + // We're signing everything + uintptr_t signedCls = (uintptr_t)ptrauth_sign_unauthenticated((void *)newCls, ISA_SIGNING_KEY, ptrauth_blend_discriminator(obj, ISA_SIGNING_DISCRIMINATOR)); + +# else +# error Unknown isa signing mode. +# endif + + shiftcls_and_sig = signedCls >> 3; + +#elif SUPPORT_INDEXED_ISA + // Indexed isa only uses this method to set a raw pointer class. + // Setting an indexed class is handled separately. + cls = newCls; + +#else // Nonpointer isa, no ptrauth + shiftcls = (uintptr_t)newCls >> 3; #endif } +// Get the class pointer out of an isa. When ptrauth is supported, +// this operation is optionally authenticated. Many code paths don't +// need the authentication, so it can be skipped in those cases for +// better performance. +// +// Note: this method does not support retrieving indexed isas. When +// indexed isas are in use, it can only be used to retrieve the class +// of a raw isa. +#if SUPPORT_INDEXED_ISA || (ISA_SIGNING_AUTH_MODE != ISA_SIGNING_AUTH) +#define MAYBE_UNUSED_AUTHENTICATED_PARAM __attribute__((unused)) +#else +#define MAYBE_UNUSED_AUTHENTICATED_PARAM UNUSED_WITHOUT_PTRAUTH +#endif + +inline Class +isa_t::getClass(MAYBE_UNUSED_AUTHENTICATED_PARAM bool authenticated) { +#if SUPPORT_INDEXED_ISA + return cls; +#else + + uintptr_t clsbits = bits; + +# if __has_feature(ptrauth_calls) +# if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH + // Most callers aren't security critical, so skip the + // authentication unless they ask for it. Message sending and + // cache filling are protected by the auth code in msgSend. + if (authenticated) { + // Mask off all bits besides the class pointer and signature. + clsbits &= ISA_MASK; + if (clsbits == 0) + return Nil; + clsbits = (uintptr_t)ptrauth_auth_data((void *)clsbits, ISA_SIGNING_KEY, ptrauth_blend_discriminator(this, ISA_SIGNING_DISCRIMINATOR)); + } else { + // If not authenticating, strip using the precomputed class mask. + clsbits &= objc_debug_isa_class_mask; + } +# else + // If not authenticating, strip using the precomputed class mask. + clsbits &= objc_debug_isa_class_mask; +# endif + +# else + clsbits &= ISA_MASK; +# endif + + return (Class)clsbits; +#endif +} + +inline Class +isa_t::getDecodedClass(bool authenticated) { +#if SUPPORT_INDEXED_ISA + if (nonpointer) { + return classForIndex(indexcls); + } + return (Class)cls; +#else + return getClass(authenticated); +#endif +} + +inline Class +objc_object::ISA(bool authenticated) +{ + ASSERT(!isTaggedPointer()); + return isa.getDecodedClass(authenticated); +} + inline Class objc_object::rawISA() { @@ -220,18 +328,25 @@ objc_object::initInstanceIsa(Class cls, bool hasCxxDtor) initIsa(cls, true, hasCxxDtor); } +#if !SUPPORT_INDEXED_ISA && !ISA_HAS_CXX_DTOR_BIT +#define UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT __attribute__((unused)) +#else +#define UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT +#endif + inline void -objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor) +objc_object::initIsa(Class cls, bool nonpointer, UNUSED_WITHOUT_INDEXED_ISA_AND_DTOR_BIT bool hasCxxDtor) { ASSERT(!isTaggedPointer()); + isa_t newisa(0); + if (!nonpointer) { - isa = isa_t((uintptr_t)cls); + newisa.setClass(cls, this); } else { ASSERT(!DisableNonpointerIsa); ASSERT(!cls->instancesRequireRawIsa()); - isa_t newisa(0); #if SUPPORT_INDEXED_ISA ASSERT(cls->classArrayIndex() > 0); @@ -244,18 +359,21 @@ objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor) newisa.bits = ISA_MAGIC_VALUE; // isa.magic is part of ISA_MAGIC_VALUE // isa.nonpointer is part of ISA_MAGIC_VALUE +# if ISA_HAS_CXX_DTOR_BIT newisa.has_cxx_dtor = hasCxxDtor; - newisa.shiftcls = (uintptr_t)cls >> 3; +# endif + newisa.setClass(cls, this); #endif - - // This write must be performed in a single store in some cases - // (for example when realizing a class because other threads - // may simultaneously try to use the class). - // fixme use atomics here to guarantee single-store and to - // guarantee memory order w.r.t. the class index table - // ...but not too atomic because we don't want to hurt instantiation - isa = newisa; + newisa.extra_rc = 1; } + + // This write must be performed in a single store in some cases + // (for example when realizing a class because other threads + // may simultaneously try to use the class). + // fixme use atomics here to guarantee single-store and to + // guarantee memory order w.r.t. the class index table + // ...but not too atomic because we don't want to hurt instantiation + isa = newisa; } @@ -270,34 +388,46 @@ objc_object::changeIsa(Class newCls) ASSERT(!isTaggedPointer()); isa_t oldisa; - isa_t newisa; + isa_t newisa(0); bool sideTableLocked = false; bool transcribeToSideTable = false; + oldisa = LoadExclusive(&isa.bits); + do { transcribeToSideTable = false; - oldisa = LoadExclusive(&isa.bits); if ((oldisa.bits == 0 || oldisa.nonpointer) && !newCls->isFuture() && newCls->canAllocNonpointer()) { // 0 -> nonpointer // nonpointer -> nonpointer #if SUPPORT_INDEXED_ISA - if (oldisa.bits == 0) newisa.bits = ISA_INDEX_MAGIC_VALUE; - else newisa = oldisa; + if (oldisa.bits == 0) { + newisa.bits = ISA_INDEX_MAGIC_VALUE; + newisa.extra_rc = 1; + } else { + newisa = oldisa; + } // isa.magic is part of ISA_MAGIC_VALUE // isa.nonpointer is part of ISA_MAGIC_VALUE newisa.has_cxx_dtor = newCls->hasCxxDtor(); ASSERT(newCls->classArrayIndex() > 0); newisa.indexcls = (uintptr_t)newCls->classArrayIndex(); #else - if (oldisa.bits == 0) newisa.bits = ISA_MAGIC_VALUE; - else newisa = oldisa; + if (oldisa.bits == 0) { + newisa.bits = ISA_MAGIC_VALUE; + newisa.extra_rc = 1; + } + else { + newisa = oldisa; + } // isa.magic is part of ISA_MAGIC_VALUE // isa.nonpointer is part of ISA_MAGIC_VALUE +# if ISA_HAS_CXX_DTOR_BIT newisa.has_cxx_dtor = newCls->hasCxxDtor(); - newisa.shiftcls = (uintptr_t)newCls >> 3; +# endif + newisa.setClass(newCls, this); #endif } else if (oldisa.nonpointer) { @@ -308,38 +438,28 @@ objc_object::changeIsa(Class newCls) if (!sideTableLocked) sidetable_lock(); sideTableLocked = true; transcribeToSideTable = true; - newisa.cls = newCls; + newisa.setClass(newCls, this); } else { // raw pointer -> raw pointer - newisa.cls = newCls; + newisa.setClass(newCls, this); } - } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)); + } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits))); if (transcribeToSideTable) { // Copy oldisa's retain count et al to side table. // oldisa.has_assoc: nothing to do // oldisa.has_cxx_dtor: nothing to do sidetable_moveExtraRC_nolock(oldisa.extra_rc, - oldisa.deallocating, + oldisa.isDeallocating(), oldisa.weakly_referenced); } if (sideTableLocked) sidetable_unlock(); - if (oldisa.nonpointer) { -#if SUPPORT_INDEXED_ISA - return classForIndex(oldisa.indexcls); -#else - return (Class)((uintptr_t)oldisa.shiftcls << 3); -#endif - } - else { - return oldisa.cls; - } + return oldisa.getDecodedClass(false); } - inline bool objc_object::hasAssociatedObjects() { @@ -354,15 +474,22 @@ objc_object::setHasAssociatedObjects() { if (isTaggedPointer()) return; - retry: - isa_t oldisa = LoadExclusive(&isa.bits); - isa_t newisa = oldisa; - if (!newisa.nonpointer || newisa.has_assoc) { - ClearExclusive(&isa.bits); - return; + if (slowpath(!hasNonpointerIsa() && ISA()->hasCustomRR()) && !ISA()->isFuture() && !ISA()->isMetaClass()) { + void(*setAssoc)(id, SEL) = (void(*)(id, SEL)) object_getMethodImplementation((id)this, @selector(_noteAssociatedObjects)); + if ((IMP)setAssoc != _objc_msgForward) { + (*setAssoc)((id)this, @selector(_noteAssociatedObjects)); + } } - newisa.has_assoc = true; - if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; + + isa_t newisa, oldisa = LoadExclusive(&isa.bits); + do { + newisa = oldisa; + if (!newisa.nonpointer || newisa.has_assoc) { + ClearExclusive(&isa.bits); + return; + } + newisa.has_assoc = true; + } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits))); } @@ -378,20 +505,20 @@ objc_object::isWeaklyReferenced() inline void objc_object::setWeaklyReferenced_nolock() { - retry: - isa_t oldisa = LoadExclusive(&isa.bits); - isa_t newisa = oldisa; - if (slowpath(!newisa.nonpointer)) { - ClearExclusive(&isa.bits); - sidetable_setWeaklyReferenced_nolock(); - return; - } - if (newisa.weakly_referenced) { - ClearExclusive(&isa.bits); - return; - } - newisa.weakly_referenced = true; - if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; + isa_t newisa, oldisa = LoadExclusive(&isa.bits); + do { + newisa = oldisa; + if (slowpath(!newisa.nonpointer)) { + ClearExclusive(&isa.bits); + sidetable_setWeaklyReferenced_nolock(); + return; + } + if (newisa.weakly_referenced) { + ClearExclusive(&isa.bits); + return; + } + newisa.weakly_referenced = true; + } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits))); } @@ -399,8 +526,12 @@ inline bool objc_object::hasCxxDtor() { ASSERT(!isTaggedPointer()); - if (isa.nonpointer) return isa.has_cxx_dtor; - else return isa.cls->hasCxxDtor(); +#if ISA_HAS_CXX_DTOR_BIT + if (isa.nonpointer) + return isa.has_cxx_dtor; + else +#endif + return ISA()->hasCxxDtor(); } @@ -409,7 +540,7 @@ inline bool objc_object::rootIsDeallocating() { if (isTaggedPointer()) return false; - if (isa.nonpointer) return isa.deallocating; + if (isa.nonpointer) return isa.isDeallocating(); return sidetable_isDeallocating(); } @@ -435,10 +566,14 @@ objc_object::rootDealloc() { if (isTaggedPointer()) return; // fixme necessary? - if (fastpath(isa.nonpointer && - !isa.weakly_referenced && - !isa.has_assoc && - !isa.has_cxx_dtor && + if (fastpath(isa.nonpointer && + !isa.weakly_referenced && + !isa.has_assoc && +#if ISA_HAS_CXX_DTOR_BIT + !isa.has_cxx_dtor && +#else + !isa.getClass(false)->hasCxxDtor() && +#endif !isa.has_sidetable_rc)) { assert(!sidetable_present()); @@ -449,6 +584,8 @@ objc_object::rootDealloc() } } +extern explicit_atomic swiftRetain; +extern explicit_atomic swiftRelease; // Equivalent to calling [this retain], with shortcuts if there is no override inline id @@ -456,14 +593,9 @@ objc_object::retain() { ASSERT(!isTaggedPointer()); - if (fastpath(!ISA()->hasCustomRR())) { - return rootRetain(); - } - - return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain)); + return rootRetain(false, RRVariant::FastOrMsgSend); } - // Base retain implementation, ignoring overrides. // This does not check isa.fast_rr; if there is an RR override then // it was already called and it chose to call [super retain]. @@ -476,19 +608,19 @@ objc_object::retain() ALWAYS_INLINE id objc_object::rootRetain() { - return rootRetain(false, false); + return rootRetain(false, RRVariant::Fast); } ALWAYS_INLINE bool objc_object::rootTryRetain() { - return rootRetain(true, false) ? true : false; + return rootRetain(true, RRVariant::Fast) ? true : false; } -ALWAYS_INLINE id -objc_object::rootRetain(bool tryRetain, bool handleOverflow) +ALWAYS_INLINE id +objc_object::rootRetain(bool tryRetain, objc_object::RRVariant variant) { - if (isTaggedPointer()) return (id)this; + if (slowpath(isTaggedPointer())) return (id)this; bool sideTableLocked = false; bool transcribeToSideTable = false; @@ -496,29 +628,56 @@ objc_object::rootRetain(bool tryRetain, bool handleOverflow) isa_t oldisa; isa_t newisa; + oldisa = LoadExclusive(&isa.bits); + + if (variant == RRVariant::FastOrMsgSend) { + // These checks are only meaningful for objc_retain() + // They are here so that we avoid a re-load of the isa. + if (slowpath(oldisa.getDecodedClass(false)->hasCustomRR())) { + ClearExclusive(&isa.bits); + if (oldisa.getDecodedClass(false)->canCallSwiftRR()) { + return swiftRetain.load(memory_order_relaxed)((id)this); + } + return ((id(*)(objc_object *, SEL))objc_msgSend)(this, @selector(retain)); + } + } + + if (slowpath(!oldisa.nonpointer)) { + // a Class is a Class forever, so we can perform this check once + // outside of the CAS loop + if (oldisa.getDecodedClass(false)->isMetaClass()) { + ClearExclusive(&isa.bits); + return (id)this; + } + } + do { transcribeToSideTable = false; - oldisa = LoadExclusive(&isa.bits); newisa = oldisa; if (slowpath(!newisa.nonpointer)) { ClearExclusive(&isa.bits); - if (rawISA()->isMetaClass()) return (id)this; - if (!tryRetain && sideTableLocked) sidetable_unlock(); if (tryRetain) return sidetable_tryRetain() ? (id)this : nil; - else return sidetable_retain(); + else return sidetable_retain(sideTableLocked); } // don't check newisa.fast_rr; we already called any RR overrides - if (slowpath(tryRetain && newisa.deallocating)) { + if (slowpath(newisa.isDeallocating())) { ClearExclusive(&isa.bits); - if (!tryRetain && sideTableLocked) sidetable_unlock(); - return nil; + if (sideTableLocked) { + ASSERT(variant == RRVariant::Full); + sidetable_unlock(); + } + if (slowpath(tryRetain)) { + return nil; + } else { + return (id)this; + } } uintptr_t carry; newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++ if (slowpath(carry)) { // newisa.extra_rc++ overflowed - if (!handleOverflow) { + if (variant != RRVariant::Full) { ClearExclusive(&isa.bits); return rootRetain_overflow(tryRetain); } @@ -530,14 +689,20 @@ objc_object::rootRetain(bool tryRetain, bool handleOverflow) newisa.extra_rc = RC_HALF; newisa.has_sidetable_rc = true; } - } while (slowpath(!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits))); + } while (slowpath(!StoreExclusive(&isa.bits, &oldisa.bits, newisa.bits))); - if (slowpath(transcribeToSideTable)) { - // Copy the other half of the retain counts to the side table. - sidetable_addExtraRC_nolock(RC_HALF); + if (variant == RRVariant::Full) { + if (slowpath(transcribeToSideTable)) { + // Copy the other half of the retain counts to the side table. + sidetable_addExtraRC_nolock(RC_HALF); + } + + if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock(); + } else { + ASSERT(!transcribeToSideTable); + ASSERT(!sideTableLocked); } - if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock(); return (id)this; } @@ -548,12 +713,7 @@ objc_object::release() { ASSERT(!isTaggedPointer()); - if (fastpath(!ISA()->hasCustomRR())) { - rootRelease(); - return; - } - - ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release)); + rootRelease(true, RRVariant::FastOrMsgSend); } @@ -570,35 +730,65 @@ objc_object::release() ALWAYS_INLINE bool objc_object::rootRelease() { - return rootRelease(true, false); + return rootRelease(true, RRVariant::Fast); } ALWAYS_INLINE bool objc_object::rootReleaseShouldDealloc() { - return rootRelease(false, false); + return rootRelease(false, RRVariant::Fast); } -ALWAYS_INLINE bool -objc_object::rootRelease(bool performDealloc, bool handleUnderflow) +ALWAYS_INLINE bool +objc_object::rootRelease(bool performDealloc, objc_object::RRVariant variant) { - if (isTaggedPointer()) return false; + if (slowpath(isTaggedPointer())) return false; bool sideTableLocked = false; - isa_t oldisa; - isa_t newisa; + isa_t newisa, oldisa; - retry: + oldisa = LoadExclusive(&isa.bits); + + if (variant == RRVariant::FastOrMsgSend) { + // These checks are only meaningful for objc_release() + // They are here so that we avoid a re-load of the isa. + if (slowpath(oldisa.getDecodedClass(false)->hasCustomRR())) { + ClearExclusive(&isa.bits); + if (oldisa.getDecodedClass(false)->canCallSwiftRR()) { + swiftRelease.load(memory_order_relaxed)((id)this); + return true; + } + ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release)); + return true; + } + } + + if (slowpath(!oldisa.nonpointer)) { + // a Class is a Class forever, so we can perform this check once + // outside of the CAS loop + if (oldisa.getDecodedClass(false)->isMetaClass()) { + ClearExclusive(&isa.bits); + return false; + } + } + +retry: do { - oldisa = LoadExclusive(&isa.bits); newisa = oldisa; if (slowpath(!newisa.nonpointer)) { ClearExclusive(&isa.bits); - if (rawISA()->isMetaClass()) return false; - if (sideTableLocked) sidetable_unlock(); - return sidetable_release(performDealloc); + return sidetable_release(sideTableLocked, performDealloc); } + if (slowpath(newisa.isDeallocating())) { + ClearExclusive(&isa.bits); + if (sideTableLocked) { + ASSERT(variant == RRVariant::Full); + sidetable_unlock(); + } + return false; + } + // don't check newisa.fast_rr; we already called any RR overrides uintptr_t carry; newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc-- @@ -606,10 +796,16 @@ objc_object::rootRelease(bool performDealloc, bool handleUnderflow) // don't ClearExclusive() goto underflow; } - } while (slowpath(!StoreReleaseExclusive(&isa.bits, - oldisa.bits, newisa.bits))); + } while (slowpath(!StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits))); - if (slowpath(sideTableLocked)) sidetable_unlock(); + if (slowpath(newisa.isDeallocating())) + goto deallocate; + + if (variant == RRVariant::Full) { + if (slowpath(sideTableLocked)) sidetable_unlock(); + } else { + ASSERT(!sideTableLocked); + } return false; underflow: @@ -619,7 +815,7 @@ objc_object::rootRelease(bool performDealloc, bool handleUnderflow) newisa = oldisa; if (slowpath(newisa.has_sidetable_rc)) { - if (!handleUnderflow) { + if (variant != RRVariant::Full) { ClearExclusive(&isa.bits); return rootRelease_underflow(performDealloc); } @@ -632,35 +828,37 @@ objc_object::rootRelease(bool performDealloc, bool handleUnderflow) sideTableLocked = true; // Need to start over to avoid a race against // the nonpointer -> raw pointer transition. + oldisa = LoadExclusive(&isa.bits); goto retry; } // Try to remove some retain counts from the side table. - size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF); + auto borrow = sidetable_subExtraRC_nolock(RC_HALF); - // To avoid races, has_sidetable_rc must remain set - // even if the side table count is now zero. + bool emptySideTable = borrow.remaining == 0; // we'll clear the side table if no refcounts remain there - if (borrowed > 0) { + if (borrow.borrowed > 0) { // Side table retain count decreased. // Try to add them to the inline count. - newisa.extra_rc = borrowed - 1; // redo the original decrement too - bool stored = StoreReleaseExclusive(&isa.bits, - oldisa.bits, newisa.bits); - if (!stored) { + bool didTransitionToDeallocating = false; + newisa.extra_rc = borrow.borrowed - 1; // redo the original decrement too + newisa.has_sidetable_rc = !emptySideTable; + + bool stored = StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits); + + if (!stored && oldisa.nonpointer) { // Inline update failed. // Try it again right now. This prevents livelock on LL/SC // architectures where the side table access itself may have // dropped the reservation. - isa_t oldisa2 = LoadExclusive(&isa.bits); - isa_t newisa2 = oldisa2; - if (newisa2.nonpointer) { - uintptr_t overflow; - newisa2.bits = - addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow); - if (!overflow) { - stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits, - newisa2.bits); + uintptr_t overflow; + newisa.bits = + addc(oldisa.bits, RC_ONE * (borrow.borrowed-1), 0, &overflow); + newisa.has_sidetable_rc = !emptySideTable; + if (!overflow) { + stored = StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits); + if (stored) { + didTransitionToDeallocating = newisa.isDeallocating(); } } } @@ -668,32 +866,31 @@ objc_object::rootRelease(bool performDealloc, bool handleUnderflow) if (!stored) { // Inline update failed. // Put the retains back in the side table. - sidetable_addExtraRC_nolock(borrowed); + ClearExclusive(&isa.bits); + sidetable_addExtraRC_nolock(borrow.borrowed); + oldisa = LoadExclusive(&isa.bits); goto retry; } // Decrement successful after borrowing from side table. - // This decrement cannot be the deallocating decrement - the side - // table lock and has_sidetable_rc bit ensure that if everyone - // else tried to -release while we worked, the last one would block. - sidetable_unlock(); - return false; + if (emptySideTable) + sidetable_clearExtraRC_nolock(); + + if (!didTransitionToDeallocating) { + if (slowpath(sideTableLocked)) sidetable_unlock(); + return false; + } } else { // Side table is empty after all. Fall-through to the dealloc path. } } +deallocate: // Really deallocate. - if (slowpath(newisa.deallocating)) { - ClearExclusive(&isa.bits); - if (sideTableLocked) sidetable_unlock(); - return overrelease_error(); - // does not actually return - } - newisa.deallocating = true; - if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; + ASSERT(newisa.isDeallocating()); + ASSERT(isa.isDeallocating()); if (slowpath(sideTableLocked)) sidetable_unlock(); @@ -736,10 +933,9 @@ objc_object::rootRetainCount() if (isTaggedPointer()) return (uintptr_t)this; sidetable_lock(); - isa_t bits = LoadExclusive(&isa.bits); - ClearExclusive(&isa.bits); + isa_t bits = __c11_atomic_load((_Atomic uintptr_t *)&isa.bits, __ATOMIC_RELAXED); if (bits.nonpointer) { - uintptr_t rc = 1 + bits.extra_rc; + uintptr_t rc = bits.extra_rc; if (bits.has_sidetable_rc) { rc += sidetable_getExtraRC_nolock(); } @@ -756,12 +952,29 @@ objc_object::rootRetainCount() #else // not SUPPORT_NONPOINTER_ISA +inline void +isa_t::setClass(Class cls, objc_object *obj) +{ + this->cls = cls; +} + +inline Class +isa_t::getClass(bool authenticated __unused) +{ + return cls; +} + +inline Class +isa_t::getDecodedClass(bool authenticated) +{ + return getClass(authenticated); +} inline Class -objc_object::ISA() +objc_object::ISA(bool authenticated __unused) { ASSERT(!isTaggedPointer()); - return isa.cls; + return isa.getClass(/*authenticated*/false); } inline Class @@ -781,7 +994,7 @@ inline void objc_object::initIsa(Class cls) { ASSERT(!isTaggedPointer()); - isa = (uintptr_t)cls; + isa.setClass(cls, this); } @@ -822,18 +1035,17 @@ objc_object::changeIsa(Class cls) // cls->isInitializing() || cls->isInitialized()); ASSERT(!isTaggedPointer()); - - isa_t oldisa, newisa; - newisa.cls = cls; - do { - oldisa = LoadExclusive(&isa.bits); - } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)); - - if (oldisa.cls && oldisa.cls->instancesHaveAssociatedObjects()) { + + isa_t newisa, oldisa; + newisa.setClass(cls, this); + oldisa.bits = __c11_atomic_exchange((_Atomic uintptr_t *)&isa.bits, newisa.bits, __ATOMIC_RELAXED); + + Class oldcls = oldisa.getDecodedClass(/*authenticated*/false); + if (oldcls && oldcls->instancesHaveAssociatedObjects()) { cls->setInstancesHaveAssociatedObjects(); } - - return oldisa.cls; + + return oldcls; } @@ -873,7 +1085,7 @@ inline bool objc_object::hasCxxDtor() { ASSERT(!isTaggedPointer()); - return isa.cls->hasCxxDtor(); + return isa.getClass(/*authenticated*/false)->hasCxxDtor(); } @@ -949,14 +1161,14 @@ inline bool objc_object::rootRelease() { if (isTaggedPointer()) return false; - return sidetable_release(true); + return sidetable_release(); } inline bool objc_object::rootReleaseShouldDealloc() { if (isTaggedPointer()) return false; - return sidetable_release(false); + return sidetable_release(/*locked*/false, /*performDealloc*/false); } diff --git a/runtime/objc-opt.mm b/runtime/objc-opt.mm index f4afb20..44abbdf 100644 --- a/runtime/objc-opt.mm +++ b/runtime/objc-opt.mm @@ -27,13 +27,13 @@ */ #include "objc-private.h" +#include "objc-os.h" +#include "objc-file.h" #if !SUPPORT_PREOPT // Preoptimization not supported on this platform. -struct objc_selopt_t; - bool isPreoptimized(void) { return false; @@ -64,16 +64,6 @@ bool header_info::hasPreoptimizedProtocols() const return false; } -objc_selopt_t *preoptimizedSelectors(void) -{ - return nil; -} - -bool sharedCacheSupportsProtocolRoots(void) -{ - return false; -} - Protocol *getPreoptimizedProtocol(const char *name) { return nil; @@ -123,7 +113,6 @@ void preopt_init(void) #include using objc_opt::objc_stringhash_offset_t; -using objc_opt::objc_protocolopt_t; using objc_opt::objc_protocolopt2_t; using objc_opt::objc_clsopt_t; using objc_opt::objc_headeropt_ro_t; @@ -141,6 +130,62 @@ static bool preoptimized; extern const objc_opt_t _objc_opt_data; // in __TEXT, __objc_opt_ro +namespace objc_opt { +struct objc_headeropt_ro_t { + uint32_t count; + uint32_t entsize; + header_info headers[0]; // sorted by mhdr address + + header_info& getOrEnd(uint32_t i) const { + ASSERT(i <= count); + return *(header_info *)((uint8_t *)&headers + (i * entsize)); + } + + header_info& get(uint32_t i) const { + ASSERT(i < count); + return *(header_info *)((uint8_t *)&headers + (i * entsize)); + } + + uint32_t index(const header_info* hi) const { + const header_info* begin = &get(0); + const header_info* end = &getOrEnd(count); + ASSERT(hi >= begin && hi < end); + return (uint32_t)(((uintptr_t)hi - (uintptr_t)begin) / entsize); + } + + header_info *get(const headerType *mhdr) + { + int32_t start = 0; + int32_t end = count; + while (start <= end) { + int32_t i = (start+end)/2; + header_info &hi = get(i); + if (mhdr == hi.mhdr()) return &hi; + else if (mhdr < hi.mhdr()) end = i-1; + else start = i+1; + } + +#if DEBUG + for (uint32_t i = 0; i < count; i++) { + header_info &hi = get(i); + if (mhdr == hi.mhdr()) { + _objc_fatal("failed to find header %p (%d/%d)", + mhdr, i, count); + } + } +#endif + + return nil; + } +}; + +struct objc_headeropt_rw_t { + uint32_t count; + uint32_t entsize; + header_info_rw headers[0]; // sorted by mhdr address +}; +}; + /*********************************************************************** * Return YES if we have a valid optimized shared cache. **********************************************************************/ @@ -199,38 +244,114 @@ bool header_info::hasPreoptimizedProtocols() const return info()->optimizedByDyld() || info()->optimizedByDyldClosure(); } - -objc_selopt_t *preoptimizedSelectors(void) +bool header_info::hasPreoptimizedSectionLookups() const { - return opt ? opt->selopt() : nil; + objc_opt::objc_headeropt_ro_t *hinfoRO = opt->headeropt_ro(); + if (hinfoRO->entsize == (2 * sizeof(intptr_t))) + return NO; + + return YES; } -bool sharedCacheSupportsProtocolRoots(void) +const classref_t *header_info::nlclslist(size_t *outCount) const { - return (opt != nil) && (opt->protocolopt2() != nil); +#if __OBJC2__ + // This field is new, so temporarily be resilient to the shared cache + // not generating it + if (isPreoptimized() && hasPreoptimizedSectionLookups()) { + *outCount = nlclslist_count; + const classref_t *list = (const classref_t *)(((intptr_t)&nlclslist_offset) + nlclslist_offset); + #if DEBUG + size_t debugCount; + assert((list == _getObjc2NonlazyClassList(mhdr(), &debugCount)) && (*outCount == debugCount)); + #endif + return list; + } + return _getObjc2NonlazyClassList(mhdr(), outCount); +#else + return NULL; +#endif +} + +category_t * const *header_info::nlcatlist(size_t *outCount) const +{ +#if __OBJC2__ + // This field is new, so temporarily be resilient to the shared cache + // not generating it + if (isPreoptimized() && hasPreoptimizedSectionLookups()) { + *outCount = nlcatlist_count; + category_t * const *list = (category_t * const *)(((intptr_t)&nlcatlist_offset) + nlcatlist_offset); + #if DEBUG + size_t debugCount; + assert((list == _getObjc2NonlazyCategoryList(mhdr(), &debugCount)) && (*outCount == debugCount)); + #endif + return list; + } + return _getObjc2NonlazyCategoryList(mhdr(), outCount); +#else + return NULL; +#endif +} + +category_t * const *header_info::catlist(size_t *outCount) const +{ +#if __OBJC2__ + // This field is new, so temporarily be resilient to the shared cache + // not generating it + if (isPreoptimized() && hasPreoptimizedSectionLookups()) { + *outCount = catlist_count; + category_t * const *list = (category_t * const *)(((intptr_t)&catlist_offset) + catlist_offset); + #if DEBUG + size_t debugCount; + assert((list == _getObjc2CategoryList(mhdr(), &debugCount)) && (*outCount == debugCount)); + #endif + return list; + } + return _getObjc2CategoryList(mhdr(), outCount); +#else + return NULL; +#endif +} + +category_t * const *header_info::catlist2(size_t *outCount) const +{ +#if __OBJC2__ + // This field is new, so temporarily be resilient to the shared cache + // not generating it + if (isPreoptimized() && hasPreoptimizedSectionLookups()) { + *outCount = catlist2_count; + category_t * const *list = (category_t * const *)(((intptr_t)&catlist2_offset) + catlist2_offset); + #if DEBUG + size_t debugCount; + assert((list == _getObjc2CategoryList2(mhdr(), &debugCount)) && (*outCount == debugCount)); + #endif + return list; + } + return _getObjc2CategoryList2(mhdr(), outCount); +#else + return NULL; +#endif } Protocol *getSharedCachePreoptimizedProtocol(const char *name) { - // Look in the new table if we have it - if (objc_protocolopt2_t *protocols2 = opt ? opt->protocolopt2() : nil) { - // Note, we have to pass the lambda directly here as otherwise we would try - // message copy and autorelease. - return (Protocol *)protocols2->getProtocol(name, [](const void* hi) -> bool { - return ((header_info *)hi)->isLoaded(); - }); - } - - objc_protocolopt_t *protocols = opt ? opt->protocolopt() : nil; + objc_protocolopt2_t *protocols = opt ? opt->protocolopt2() : nil; if (!protocols) return nil; - return (Protocol *)protocols->getProtocol(name); + // Note, we have to pass the lambda directly here as otherwise we would try + // message copy and autorelease. + return (Protocol *)protocols->getProtocol(name, [](const void* hi) -> bool { + return ((header_info *)hi)->isLoaded(); + }); } Protocol *getPreoptimizedProtocol(const char *name) { + objc_protocolopt2_t *protocols = opt ? opt->protocolopt2() : nil; + if (!protocols) return nil; + // Try table from dyld closure first. It was built to ignore the dupes it // knows will come from the cache, so anything left in here was there when // we launched @@ -354,47 +475,6 @@ Class* copyPreoptimizedClasses(const char *name, int *outCount) return nil; } -namespace objc_opt { -struct objc_headeropt_ro_t { - uint32_t count; - uint32_t entsize; - header_info headers[0]; // sorted by mhdr address - - header_info *get(const headerType *mhdr) - { - ASSERT(entsize == sizeof(header_info)); - - int32_t start = 0; - int32_t end = count; - while (start <= end) { - int32_t i = (start+end)/2; - header_info *hi = headers+i; - if (mhdr == hi->mhdr()) return hi; - else if (mhdr < hi->mhdr()) end = i-1; - else start = i+1; - } - -#if DEBUG - for (uint32_t i = 0; i < count; i++) { - header_info *hi = headers+i; - if (mhdr == hi->mhdr()) { - _objc_fatal("failed to find header %p (%d/%d)", - mhdr, i, count); - } - } -#endif - - return nil; - } -}; - -struct objc_headeropt_rw_t { - uint32_t count; - uint32_t entsize; - header_info_rw headers[0]; // sorted by mhdr address -}; -}; - header_info *preoptimizedHinfoForHeader(const headerType *mhdr) { @@ -422,7 +502,7 @@ header_info_rw *getPreoptimizedHeaderRW(const struct header_info *const hdr) _objc_fatal("preoptimized header_info missing for %s (%p %p %p)", hdr->fname(), hdr, hinfoRO, hinfoRW); } - int32_t index = (int32_t)(hdr - hinfoRO->headers); + int32_t index = hinfoRO->index(hdr); ASSERT(hinfoRW->entsize == sizeof(header_info_rw)); return &hinfoRW->headers[index]; } @@ -435,7 +515,7 @@ void preopt_init(void) const uintptr_t start = (uintptr_t)_dyld_get_shared_cache_range(&length); if (start) { - objc::dataSegmentsRanges.add(start, start + length); + objc::dataSegmentsRanges.setSharedCacheRange(start, start + length); } // `opt` not set at compile time in order to detect too-early usage diff --git a/runtime/objc-os.h b/runtime/objc-os.h index c28ba05..6e38e0e 100644 --- a/runtime/objc-os.h +++ b/runtime/objc-os.h @@ -93,6 +93,16 @@ struct explicit_atomic : public std::atomic { } }; +namespace objc { +static inline uintptr_t mask16ShiftBits(uint16_t mask) +{ + // returns by how much 0xffff must be shifted "right" to return mask + uintptr_t maskShift = __builtin_clz(mask) - 16; + ASSERT((0xffff >> maskShift) == mask); + return maskShift; +} +} + #if TARGET_OS_MAC # define OS_UNFAIR_LOCK_INLINE 1 @@ -175,17 +185,25 @@ LoadExclusive(uintptr_t *src) static ALWAYS_INLINE bool -StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value) +StoreExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value) { - return !__builtin_arm_strex(value, dst); + if (slowpath(__builtin_arm_strex(value, dst))) { + *oldvalue = LoadExclusive(dst); + return false; + } + return true; } static ALWAYS_INLINE bool -StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value) +StoreReleaseExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value) { - return !__builtin_arm_stlex(value, dst); + if (slowpath(__builtin_arm_stlex(value, dst))) { + *oldvalue = LoadExclusive(dst); + return false; + } + return true; } static ALWAYS_INLINE @@ -206,17 +224,17 @@ LoadExclusive(uintptr_t *src) static ALWAYS_INLINE bool -StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value) +StoreExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value) { - return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, &oldvalue, value, __ATOMIC_RELAXED, __ATOMIC_RELAXED); + return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, oldvalue, value, __ATOMIC_RELAXED, __ATOMIC_RELAXED); } static ALWAYS_INLINE bool -StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value) +StoreReleaseExclusive(uintptr_t *dst, uintptr_t *oldvalue, uintptr_t value) { - return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, &oldvalue, value, __ATOMIC_RELEASE, __ATOMIC_RELAXED); + return __c11_atomic_compare_exchange_weak((_Atomic(uintptr_t) *)dst, oldvalue, value, __ATOMIC_RELEASE, __ATOMIC_RELAXED); } static ALWAYS_INLINE @@ -726,7 +744,7 @@ class mutex_tt : nocopy_t { lockdebug_remember_mutex(this); } - constexpr mutex_tt(const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { } + constexpr mutex_tt(__unused const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { } void lock() { lockdebug_mutex_lock(this); @@ -762,7 +780,7 @@ class mutex_tt : nocopy_t { // Address-ordered lock discipline for a pair of locks. static void lockTwo(mutex_tt *lock1, mutex_tt *lock2) { - if (lock1 < lock2) { + if ((uintptr_t)lock1 < (uintptr_t)lock2) { lock1->lock(); lock2->lock(); } else { @@ -812,7 +830,7 @@ class recursive_mutex_tt : nocopy_t { lockdebug_remember_recursive_mutex(this); } - constexpr recursive_mutex_tt(const fork_unsafe_lock_t unsafe) + constexpr recursive_mutex_tt(__unused const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_RECURSIVE_LOCK_INIT) { } @@ -877,7 +895,7 @@ class monitor_tt { lockdebug_remember_monitor(this); } - monitor_tt(const fork_unsafe_lock_t unsafe) + monitor_tt(__unused const fork_unsafe_lock_t unsafe) : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER) { } @@ -1019,63 +1037,20 @@ ustrdupMaybeNil(const uint8_t *str) // OS version checking: // -// sdkVersion() -// DYLD_OS_VERSION(mac, ios, tv, watch, bridge) -// sdkIsOlderThan(mac, ios, tv, watch, bridge) // sdkIsAtLeast(mac, ios, tv, watch, bridge) -// +// // This version order matches OBJC_AVAILABLE. - -#if TARGET_OS_OSX -# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_MACOSX_VERSION_##x -# define sdkVersion() dyld_get_program_sdk_version() - -#elif TARGET_OS_IOS -# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##i -# define sdkVersion() dyld_get_program_sdk_version() - -#elif TARGET_OS_TV - // dyld does not currently have distinct constants for tvOS -# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t -# define sdkVersion() dyld_get_program_sdk_version() - -#elif TARGET_OS_BRIDGE -# if TARGET_OS_WATCH -# error bridgeOS 1.0 not supported -# endif - // fixme don't need bridgeOS versioning yet -# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_IOS_VERSION_##t -# define sdkVersion() dyld_get_program_sdk_bridge_os_version() - -#elif TARGET_OS_WATCH -# define DYLD_OS_VERSION(x, i, t, w, b) DYLD_WATCHOS_VERSION_##w - // watchOS has its own API for compatibility reasons -# define sdkVersion() dyld_get_program_sdk_watch_os_version() - -#else -# error unknown OS -#endif +// +// NOTE: prefer dyld_program_sdk_at_least when possible +#define sdkIsAtLeast(x, i, t, w, b) \ + (dyld_program_sdk_at_least(dyld_platform_version_macOS_ ## x) || \ + dyld_program_sdk_at_least(dyld_platform_version_iOS_ ## i) || \ + dyld_program_sdk_at_least(dyld_platform_version_tvOS_ ## t) || \ + dyld_program_sdk_at_least(dyld_platform_version_watchOS_ ## w) || \ + dyld_program_sdk_at_least(dyld_platform_version_bridgeOS_ ## b)) -#define sdkIsOlderThan(x, i, t, w, b) \ - (sdkVersion() < DYLD_OS_VERSION(x, i, t, w, b)) -#define sdkIsAtLeast(x, i, t, w, b) \ - (sdkVersion() >= DYLD_OS_VERSION(x, i, t, w, b)) - -// Allow bare 0 to be used in DYLD_OS_VERSION() and sdkIsOlderThan() -#define DYLD_MACOSX_VERSION_0 0 -#define DYLD_IOS_VERSION_0 0 -#define DYLD_TVOS_VERSION_0 0 -#define DYLD_WATCHOS_VERSION_0 0 -#define DYLD_BRIDGEOS_VERSION_0 0 - -// Pretty-print a DYLD_*_VERSION_* constant. -#define SDK_FORMAT "%hu.%hhu.%hhu" -#define FORMAT_SDK(v) \ - (unsigned short)(((uint32_t)(v))>>16), \ - (unsigned char)(((uint32_t)(v))>>8), \ - (unsigned char)(((uint32_t)(v))>>0) - +#ifndef __BUILDING_OBJCDT__ // fork() safety requires careful tracking of all locks. // Our custom lock types check this in debug builds. // Disallow direct use of all other lock types. @@ -1083,6 +1058,6 @@ typedef __darwin_pthread_mutex_t pthread_mutex_t UNAVAILABLE_ATTRIBUTE; typedef __darwin_pthread_rwlock_t pthread_rwlock_t UNAVAILABLE_ATTRIBUTE; typedef int32_t OSSpinLock UNAVAILABLE_ATTRIBUTE; typedef struct os_unfair_lock_s os_unfair_lock UNAVAILABLE_ATTRIBUTE; - +#endif #endif diff --git a/runtime/objc-os.mm b/runtime/objc-os.mm index 7d600ef..39cf2db 100644 --- a/runtime/objc-os.mm +++ b/runtime/objc-os.mm @@ -28,7 +28,7 @@ #include "objc-private.h" #include "objc-loadmethod.h" -#include "objc-cache.h" +#include "objc-bp-assist.h" #if TARGET_OS_WIN32 @@ -492,11 +492,16 @@ map_images_nolock(unsigned mhCount, const char * const mhPaths[], if (mhdr->filetype == MH_EXECUTE) { // Size some data structures based on main executable's size #if __OBJC2__ - size_t count; - _getObjc2SelectorRefs(hi, &count); - selrefCount += count; - _getObjc2MessageRefs(hi, &count); - selrefCount += count; + // If dyld3 optimized the main executable, then there shouldn't + // be any selrefs needed in the dynamic map so we can just init + // to a 0 sized map + if ( !hi->hasPreoptimizedSelectors() ) { + size_t count; + _getObjc2SelectorRefs(hi, &count); + selrefCount += count; + _getObjc2MessageRefs(hi, &count); + selrefCount += count; + } #else _getObjcSelectorRefs(hi, &selrefCount); #endif @@ -559,13 +564,12 @@ map_images_nolock(unsigned mhCount, const char * const mhPaths[], // Disable +initialize fork safety if the app has a // __DATA,__objc_fork_ok section. - if (dyld_get_program_sdk_version() < DYLD_MACOSX_VERSION_10_13) { + if (!dyld_program_sdk_at_least(dyld_platform_version_macOS_10_13)) { DisableInitializeForkSafety = true; if (PrintInitializing) { _objc_inform("INITIALIZE: disabling +initialize fork " "safety enforcement because the app is " - "too old (SDK version " SDK_FORMAT ")", - FORMAT_SDK(dyld_get_program_sdk_version())); + "too old.)"); } } @@ -657,6 +661,11 @@ static void static_init() for (size_t i = 0; i < count; i++) { inits[i](); } + auto offsets = getLibobjcInitializerOffsets(&_mh_dylib_header, &count); + for (size_t i = 0; i < count; i++) { + UnsignedInitializer init(offsets[i]); + init(); + } } @@ -922,7 +931,9 @@ void _objc_init(void) static_init(); runtime_init(); exception_init(); - cache_init(); +#if __OBJC2__ + cache_t::init(); +#endif _imp_implementationWithBlock_init(); _dyld_objc_notify_register(&map_images, load_images, unmap_image); diff --git a/runtime/objc-private.h b/runtime/objc-private.h index 4d7aab2..c801ba0 100644 --- a/runtime/objc-private.h +++ b/runtime/objc-private.h @@ -53,11 +53,23 @@ #define ASSERT(x) assert(x) #endif +// `this` is never NULL in C++ unless we encounter UB, but checking for what's impossible +// is the point of these asserts, so disable the corresponding warning, and let's hope +// we will reach the assert despite the UB +#define ASSERT_THIS_NOT_NULL \ +_Pragma("clang diagnostic push") \ +_Pragma("clang diagnostic ignored \"-Wundefined-bool-conversion\"") \ +ASSERT(this) \ +_Pragma("clang diagnostic pop") + + struct objc_class; struct objc_object; +struct category_t; typedef struct objc_class *Class; typedef struct objc_object *id; +typedef struct classref *classref_t; namespace { struct SideTable; @@ -69,13 +81,32 @@ union isa_t { isa_t() { } isa_t(uintptr_t value) : bits(value) { } - Class cls; uintptr_t bits; + +private: + // Accessing the class requires custom ptrauth operations, so + // force clients to go through setClass/getClass by making this + // private. + Class cls; + +public: #if defined(ISA_BITFIELD) struct { ISA_BITFIELD; // defined in isa.h }; + + bool isDeallocating() { + return extra_rc == 0 && has_sidetable_rc == 0; + } + void setDeallocating() { + extra_rc = 0; + has_sidetable_rc = 0; + } #endif + + void setClass(Class cls, objc_object *obj); + Class getClass(bool authenticated); + Class getDecodedClass(bool authenticated); }; @@ -86,7 +117,7 @@ private: public: // ISA() assumes this is NOT a tagged pointer object - Class ISA(); + Class ISA(bool authenticated = false); // rawISA() assumes this is NOT a tagged pointer object or a non pointer ISA Class rawISA(); @@ -113,6 +144,7 @@ public: bool hasNonpointerIsa(); bool isTaggedPointer(); + bool isTaggedPointerOrNil(); bool isBasicTaggedPointer(); bool isExtTaggedPointer(); bool isClass(); @@ -154,22 +186,36 @@ private: uintptr_t overrelease_error(); #if SUPPORT_NONPOINTER_ISA + // Controls what parts of root{Retain,Release} to emit/inline + // - Full means the full (slow) implementation + // - Fast means the fastpaths only + // - FastOrMsgSend means the fastpaths but checking whether we should call + // -retain/-release or Swift, for the usage of objc_{retain,release} + enum class RRVariant { + Full, + Fast, + FastOrMsgSend, + }; + // Unified retain count manipulation for nonpointer isa - id rootRetain(bool tryRetain, bool handleOverflow); - bool rootRelease(bool performDealloc, bool handleUnderflow); + inline id rootRetain(bool tryRetain, RRVariant variant); + inline bool rootRelease(bool performDealloc, RRVariant variant); id rootRetain_overflow(bool tryRetain); uintptr_t rootRelease_underflow(bool performDealloc); void clearDeallocating_slow(); // Side table retain count overflow for nonpointer isa + struct SidetableBorrow { size_t borrowed, remaining; }; + void sidetable_lock(); void sidetable_unlock(); void sidetable_moveExtraRC_nolock(size_t extra_rc, bool isDeallocating, bool weaklyReferenced); bool sidetable_addExtraRC_nolock(size_t delta_rc); - size_t sidetable_subExtraRC_nolock(size_t delta_rc); + SidetableBorrow sidetable_subExtraRC_nolock(size_t delta_rc); size_t sidetable_getExtraRC_nolock(); + void sidetable_clearExtraRC_nolock(); #endif // Side-table-only retain count @@ -179,10 +225,10 @@ private: bool sidetable_isWeaklyReferenced(); void sidetable_setWeaklyReferenced_nolock(); - id sidetable_retain(); + id sidetable_retain(bool locked = false); id sidetable_retain_slow(SideTable& table); - uintptr_t sidetable_release(bool performDealloc = true); + uintptr_t sidetable_release(bool locked = false, bool performDealloc = true); uintptr_t sidetable_release_slow(SideTable& table, bool performDealloc = true); bool sidetable_tryRetain(); @@ -241,14 +287,6 @@ typedef struct old_property *objc_property_t; #include "objc-loadmethod.h" -#if SUPPORT_PREOPT && __cplusplus -#include -using objc_selopt_t = const objc_opt::objc_selopt_t; -#else -struct objc_selopt_t; -#endif - - #define STRINGIFY(x) #x #define STRINGIFY2(x) STRINGIFY(x) @@ -284,16 +322,24 @@ private: } }; + struct Range shared_cache; struct Range *ranges; uint32_t count; uint32_t size : 31; uint32_t sorted : 1; public: + inline bool inSharedCache(uintptr_t ptr) const { + return shared_cache.contains(ptr); + } inline bool contains(uint16_t witness, uintptr_t ptr) const { return witness < count && ranges[witness].contains(ptr); } + inline void setSharedCacheRange(uintptr_t start, uintptr_t end) { + shared_cache = Range{start, end}; + add(start, end); + } bool find(uintptr_t ptr, uint32_t &pos); void add(uintptr_t start, uintptr_t end); void remove(uintptr_t start, uintptr_t end); @@ -301,6 +347,10 @@ public: extern struct SafeRanges dataSegmentsRanges; +static inline bool inSharedCache(uintptr_t ptr) { + return dataSegmentsRanges.inSharedCache(ptr); +} + } // objc struct header_info; @@ -358,6 +408,22 @@ private: // from this location. intptr_t info_offset; + // Offset from this location to the non-lazy class list + intptr_t nlclslist_offset; + uintptr_t nlclslist_count; + + // Offset from this location to the non-lazy category list + intptr_t nlcatlist_offset; + uintptr_t nlcatlist_count; + + // Offset from this location to the category list + intptr_t catlist_offset; + uintptr_t catlist_count; + + // Offset from this location to the category list 2 + intptr_t catlist2_offset; + uintptr_t catlist2_count; + // Do not add fields without editing ObjCModernAbstraction.hpp public: @@ -384,6 +450,30 @@ public: info_offset = (intptr_t)info - (intptr_t)&info_offset; } + const classref_t *nlclslist(size_t *outCount) const; + + void set_nlclslist(const void *list) { + nlclslist_offset = (intptr_t)list - (intptr_t)&nlclslist_offset; + } + + category_t * const *nlcatlist(size_t *outCount) const; + + void set_nlcatlist(const void *list) { + nlcatlist_offset = (intptr_t)list - (intptr_t)&nlcatlist_offset; + } + + category_t * const *catlist(size_t *outCount) const; + + void set_catlist(const void *list) { + catlist_offset = (intptr_t)list - (intptr_t)&catlist_offset; + } + + category_t * const *catlist2(size_t *outCount) const; + + void set_catlist2(const void *list) { + catlist2_offset = (intptr_t)list - (intptr_t)&catlist2_offset; + } + bool isLoaded() { return getHeaderInfoRW()->getLoaded(); } @@ -424,6 +514,8 @@ public: bool hasPreoptimizedProtocols() const; + bool hasPreoptimizedSectionLookups() const; + #if !__OBJC2__ struct old_protocol **proto_refs; struct objc_module *mod_ptr; @@ -497,9 +589,6 @@ extern bool isPreoptimized(void); extern bool noMissingWeakSuperclasses(void); extern header_info *preoptimizedHinfoForHeader(const headerType *mhdr); -extern objc_selopt_t *preoptimizedSelectors(void); - -extern bool sharedCacheSupportsProtocolRoots(void); extern Protocol *getPreoptimizedProtocol(const char *name); extern Protocol *getSharedCachePreoptimizedProtocol(const char *name); @@ -513,18 +602,22 @@ extern Class _calloc_class(size_t size); enum { LOOKUP_INITIALIZE = 1, LOOKUP_RESOLVER = 2, - LOOKUP_CACHE = 4, - LOOKUP_NIL = 8, + LOOKUP_NIL = 4, + LOOKUP_NOCACHE = 8, }; extern IMP lookUpImpOrForward(id obj, SEL, Class cls, int behavior); - -static inline IMP -lookUpImpOrNil(id obj, SEL sel, Class cls, int behavior = 0) -{ - return lookUpImpOrForward(obj, sel, cls, behavior | LOOKUP_CACHE | LOOKUP_NIL); -} +extern IMP lookUpImpOrForwardTryCache(id obj, SEL, Class cls, int behavior = 0); +extern IMP lookUpImpOrNilTryCache(id obj, SEL, Class cls, int behavior = 0); extern IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel); + +struct IMPAndSEL { + IMP imp; + SEL sel; +}; + +extern IMPAndSEL _method_getImplementationAndName(Method m); + extern BOOL class_respondsToSelector_inst(id inst, SEL sel, Class cls); extern Class class_initialize(Class cls, id inst); @@ -775,18 +868,18 @@ __attribute__((aligned(1))) typedef int16_t unaligned_int16_t; // Global operator new and delete. We must not use any app overrides. // This ALSO REQUIRES each of these be in libobjc's unexported symbol list. -#if __cplusplus +#if __cplusplus && !defined(TEST_OVERRIDES_NEW) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Winline-new-delete" #include -inline void* operator new(std::size_t size) throw (std::bad_alloc) { return malloc(size); } -inline void* operator new[](std::size_t size) throw (std::bad_alloc) { return malloc(size); } -inline void* operator new(std::size_t size, const std::nothrow_t&) throw() { return malloc(size); } -inline void* operator new[](std::size_t size, const std::nothrow_t&) throw() { return malloc(size); } -inline void operator delete(void* p) throw() { free(p); } -inline void operator delete[](void* p) throw() { free(p); } -inline void operator delete(void* p, const std::nothrow_t&) throw() { free(p); } -inline void operator delete[](void* p, const std::nothrow_t&) throw() { free(p); } +inline void* operator new(std::size_t size) { return malloc(size); } +inline void* operator new[](std::size_t size) { return malloc(size); } +inline void* operator new(std::size_t size, const std::nothrow_t&) noexcept(true) { return malloc(size); } +inline void* operator new[](std::size_t size, const std::nothrow_t&) noexcept(true) { return malloc(size); } +inline void operator delete(void* p) noexcept(true) { free(p); } +inline void operator delete[](void* p) noexcept(true) { free(p); } +inline void operator delete(void* p, const std::nothrow_t&) noexcept(true) { free(p); } +inline void operator delete[](void* p, const std::nothrow_t&) noexcept(true) { free(p); } #pragma clang diagnostic pop #endif @@ -971,7 +1064,7 @@ class ChainedHookFunction { std::atomic hook{nil}; public: - ChainedHookFunction(Fn f) : hook{f} { }; + constexpr ChainedHookFunction(Fn f) : hook{f} { }; Fn get() { return hook.load(std::memory_order_acquire); @@ -990,10 +1083,10 @@ public: // A small vector for use as a global variable. Only supports appending and -// iteration. Stores a single element inline, and multiple elements in a heap +// iteration. Stores up to N elements inline, and multiple elements in a heap // allocation. There is no attempt to amortize reallocation cost; this is -// intended to be used in situation where zero or one element is common, two -// might happen, and three or more is very rare. +// intended to be used in situation where a small number of elements is +// common, more might happen, and significantly more is very rare. // // This does not clean up its allocation, and thus cannot be used as a local // variable or member of something with limited lifetime. @@ -1006,7 +1099,7 @@ protected: unsigned count{0}; union { T inlineElements[InlineCount]; - T *elements; + T *elements{nullptr}; }; public: diff --git a/runtime/objc-probes.h b/runtime/objc-probes.h deleted file mode 100644 index 375e6af..0000000 --- a/runtime/objc-probes.h +++ /dev/null @@ -1,3 +0,0 @@ -#define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(obj) -#define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() - diff --git a/runtime/objc-ptrauth.h b/runtime/objc-ptrauth.h index e275dca..8b8ed5b 100644 --- a/runtime/objc-ptrauth.h +++ b/runtime/objc-ptrauth.h @@ -60,6 +60,12 @@ #define __ptrauth_swift_value_witness_function_pointer(__key) #endif +// Workaround Definitions of ptrauth_sign_unauthenticated and friends generate unused variables warnings +#if __has_feature(ptrauth_calls) +#define UNUSED_WITHOUT_PTRAUTH +#else +#define UNUSED_WITHOUT_PTRAUTH __unused +#endif #if __has_feature(ptrauth_calls) @@ -76,5 +82,123 @@ using MethodListIMP = IMP; #endif +// A struct that wraps a pointer using the provided template. +// The provided Auth parameter is used to sign and authenticate +// the pointer as it is read and written. +template +struct WrappedPtr { +private: + T *ptr; + +public: + WrappedPtr(T *p) { + *this = p; + } + + WrappedPtr(const WrappedPtr &p) { + *this = p; + } + + WrappedPtr &operator =(T *p) { + ptr = Auth::sign(p, &ptr); + return *this; + } + + WrappedPtr &operator =(const WrappedPtr &p) { + *this = (T *)p; + return *this; + } + + operator T*() const { return get(); } + T *operator->() const { return get(); } + + T *get() const { return Auth::auth(ptr, &ptr); } + + // When asserts are enabled, ensure that we can read a byte from + // the underlying pointer. This can be used to catch ptrauth + // errors early for easier debugging. + void validate() const { +#if !NDEBUG + char *p = (char *)get(); + char dummy; + memset_s(&dummy, 1, *p, 1); + ASSERT(dummy == *p); +#endif + } +}; + +// A "ptrauth" struct that just passes pointers through unchanged. +struct PtrauthRaw { + template + static T *sign(T *ptr, __unused const void *address) { + return ptr; + } + + template + static T *auth(T *ptr, __unused const void *address) { + return ptr; + } +}; + +// A ptrauth struct that stores pointers raw, and strips ptrauth +// when reading. +struct PtrauthStrip { + template + static T *sign(T *ptr, __unused const void *address) { + return ptr; + } + + template + static T *auth(T *ptr, __unused const void *address) { + return ptrauth_strip(ptr, ptrauth_key_process_dependent_data); + } +}; + +// A ptrauth struct that signs and authenticates pointers using the +// DB key with the given discriminator and address diversification. +template +struct Ptrauth { + template + static T *sign(T *ptr, UNUSED_WITHOUT_PTRAUTH const void *address) { + if (!ptr) + return nullptr; + return ptrauth_sign_unauthenticated(ptr, ptrauth_key_process_dependent_data, ptrauth_blend_discriminator(address, discriminator)); + } + + template + static T *auth(T *ptr, UNUSED_WITHOUT_PTRAUTH const void *address) { + if (!ptr) + return nullptr; + return ptrauth_auth_data(ptr, ptrauth_key_process_dependent_data, ptrauth_blend_discriminator(address, discriminator)); + } +}; + +// A template that produces a WrappedPtr to the given type using a +// plain unauthenticated pointer. +template using RawPtr = WrappedPtr; + +#if __has_feature(ptrauth_calls) +// Get a ptrauth type that uses a string discriminator. +#if __BUILDING_OBJCDT__ +#define PTRAUTH_STR(name) PtrauthStrip +#else +#define PTRAUTH_STR(name) Ptrauth +#endif + +// When ptrauth is available, declare a template that wraps a type +// in a WrappedPtr that uses an authenticated pointer using the +// process-dependent data key, address diversification, and a +// discriminator based on the name passed in. +// +// When ptrauth is not available, equivalent to RawPtr. +#define DECLARE_AUTHED_PTR_TEMPLATE(name) \ + template using name ## _authed_ptr \ + = WrappedPtr; +#else +#define PTRAUTH_STR(name) PtrauthRaw +#define DECLARE_AUTHED_PTR_TEMPLATE(name) \ + template using name ## _authed_ptr = RawPtr; +#endif + // _OBJC_PTRAUTH_H_ #endif diff --git a/runtime/objc-references.h b/runtime/objc-references.h index 8c79405..71fadae 100644 --- a/runtime/objc-references.h +++ b/runtime/objc-references.h @@ -35,7 +35,7 @@ __BEGIN_DECLS extern void _objc_associations_init(); extern void _object_set_associative_reference(id object, const void *key, id value, uintptr_t policy); extern id _object_get_associative_reference(id object, const void *key); -extern void _object_remove_assocations(id object); +extern void _object_remove_assocations(id object, bool deallocating); __END_DECLS diff --git a/runtime/objc-references.mm b/runtime/objc-references.mm index caa8910..b9ea085 100644 --- a/runtime/objc-references.mm +++ b/runtime/objc-references.mm @@ -38,7 +38,8 @@ enum { OBJC_ASSOCIATION_SETTER_COPY = 3, // NOTE: both bits are set, so we can simply test 1 bit in releaseValue below. OBJC_ASSOCIATION_GETTER_READ = (0 << 8), OBJC_ASSOCIATION_GETTER_RETAIN = (1 << 8), - OBJC_ASSOCIATION_GETTER_AUTORELEASE = (2 << 8) + OBJC_ASSOCIATION_GETTER_AUTORELEASE = (2 << 8), + OBJC_ASSOCIATION_SYSTEM_OBJECT = _OBJC_ASSOCIATION_SYSTEM_OBJECT, // 1 << 16 }; spinlock_t AssociationsManagerLock; @@ -172,6 +173,7 @@ _object_set_associative_reference(id object, const void *key, id value, uintptr_ // retain the new value (if any) outside the lock. association.acquireValue(); + bool isFirstAssociation = false; { AssociationsManager manager; AssociationsHashMap &associations(manager.get()); @@ -180,7 +182,7 @@ _object_set_associative_reference(id object, const void *key, id value, uintptr_ auto refs_result = associations.try_emplace(disguised, ObjectAssociationMap{}); if (refs_result.second) { /* it's the first association we make */ - object->setHasAssociatedObjects(); + isFirstAssociation = true; } /* establish or replace the association */ @@ -206,6 +208,13 @@ _object_set_associative_reference(id object, const void *key, id value, uintptr_ } } + // Call setHasAssociatedObjects outside the lock, since this + // will call the object's _noteAssociatedObjects method if it + // has one, and this may trigger +initialize which might do + // arbitrary stuff, including setting more associated objects. + if (isFirstAssociation) + object->setHasAssociatedObjects(); + // release the old value (outside of the lock). association.releaseHeldValue(); } @@ -215,7 +224,7 @@ _object_set_associative_reference(id object, const void *key, id value, uintptr_ // raw isa objects (such as OS Objects) that can't track // whether they have associated objects. void -_object_remove_assocations(id object) +_object_remove_assocations(id object, bool deallocating) { ObjectAssociationMap refs{}; @@ -225,12 +234,36 @@ _object_remove_assocations(id object) AssociationsHashMap::iterator i = associations.find((objc_object *)object); if (i != associations.end()) { refs.swap(i->second); - associations.erase(i); + + // If we are not deallocating, then SYSTEM_OBJECT associations are preserved. + bool didReInsert = false; + if (!deallocating) { + for (auto &ref: refs) { + if (ref.second.policy() & OBJC_ASSOCIATION_SYSTEM_OBJECT) { + i->second.insert(ref); + didReInsert = true; + } + } + } + if (!didReInsert) + associations.erase(i); } } + // Associations to be released after the normal ones. + SmallVector laterRefs; + // release everything (outside of the lock). for (auto &i: refs) { - i.second.releaseHeldValue(); + if (i.second.policy() & OBJC_ASSOCIATION_SYSTEM_OBJECT) { + // If we are not deallocating, then RELEASE_LATER associations don't get released. + if (deallocating) + laterRefs.append(&i.second); + } else { + i.second.releaseHeldValue(); + } + } + for (auto *later: laterRefs) { + later->releaseHeldValue(); } } diff --git a/runtime/objc-runtime-new.h b/runtime/objc-runtime-new.h index d3541cf..f44a0d0 100644 --- a/runtime/objc-runtime-new.h +++ b/runtime/objc-runtime-new.h @@ -25,6 +25,7 @@ #define _OBJC_RUNTIME_NEW_H #include "PointerUnion.h" +#include // class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags) // The extra bits are optimized for the retain/release and alloc/dealloc paths. @@ -94,13 +95,19 @@ // class has started realizing but not yet completed it #define RW_REALIZING (1<<19) +#if CONFIG_USE_PREOPT_CACHES +// this class and its descendants can't have preopt caches with inlined sels +#define RW_NOPREOPT_SELS (1<<2) +// this class and its descendants can't have preopt caches +#define RW_NOPREOPT_CACHE (1<<1) +#endif + // class is a metaclass (copied from ro) #define RW_META RO_META // (1<<0) // NOTE: MORE RW_ FLAGS DEFINED BELOW - // Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*), // or class_t->bits (FAST_*). // @@ -215,19 +222,19 @@ private: #endif // Compute the ptrauth signing modifier from &_imp, newSel, and cls. - uintptr_t modifierForSEL(SEL newSel, Class cls) const { - return (uintptr_t)&_imp ^ (uintptr_t)newSel ^ (uintptr_t)cls; + uintptr_t modifierForSEL(bucket_t *base, SEL newSel, Class cls) const { + return (uintptr_t)base ^ (uintptr_t)newSel ^ (uintptr_t)cls; } // Sign newImp, with &_imp, newSel, and cls as modifiers. - uintptr_t encodeImp(IMP newImp, SEL newSel, Class cls) const { + uintptr_t encodeImp(UNUSED_WITHOUT_PTRAUTH bucket_t *base, IMP newImp, UNUSED_WITHOUT_PTRAUTH SEL newSel, Class cls) const { if (!newImp) return 0; #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH return (uintptr_t) ptrauth_auth_and_resign(newImp, ptrauth_key_function_pointer, 0, ptrauth_key_process_dependent_code, - modifierForSEL(newSel, cls)); + modifierForSEL(base, newSel, cls)); #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR return (uintptr_t)newImp ^ (uintptr_t)cls; #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE @@ -238,17 +245,36 @@ private: } public: - inline SEL sel() const { return _sel.load(memory_order::memory_order_relaxed); } + static inline size_t offsetOfSel() { return offsetof(bucket_t, _sel); } + inline SEL sel() const { return _sel.load(memory_order_relaxed); } - inline IMP imp(Class cls) const { - uintptr_t imp = _imp.load(memory_order::memory_order_relaxed); +#if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR +#define MAYBE_UNUSED_ISA +#else +#define MAYBE_UNUSED_ISA __attribute__((unused)) +#endif + inline IMP rawImp(MAYBE_UNUSED_ISA objc_class *cls) const { + uintptr_t imp = _imp.load(memory_order_relaxed); if (!imp) return nil; #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH - SEL sel = _sel.load(memory_order::memory_order_relaxed); +#elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR + imp ^= (uintptr_t)cls; +#elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE +#else +#error Unknown method cache IMP encoding. +#endif + return (IMP)imp; + } + + inline IMP imp(UNUSED_WITHOUT_PTRAUTH bucket_t *base, Class cls) const { + uintptr_t imp = _imp.load(memory_order_relaxed); + if (!imp) return nil; +#if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH + SEL sel = _sel.load(memory_order_relaxed); return (IMP) ptrauth_auth_and_resign((const void *)imp, ptrauth_key_process_dependent_code, - modifierForSEL(sel, cls), + modifierForSEL(base, sel, cls), ptrauth_key_function_pointer, 0); #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR return (IMP)(imp ^ (uintptr_t)cls); @@ -260,26 +286,97 @@ public: } template - void set(SEL newSel, IMP newImp, Class cls); + void set(bucket_t *base, SEL newSel, IMP newImp, Class cls); }; +/* dyld_shared_cache_builder and obj-C agree on these definitions */ +enum { + OBJC_OPT_METHODNAME_START = 0, + OBJC_OPT_METHODNAME_END = 1, + OBJC_OPT_INLINED_METHODS_START = 2, + OBJC_OPT_INLINED_METHODS_END = 3, + + __OBJC_OPT_OFFSETS_COUNT, +}; + +#if CONFIG_USE_PREOPT_CACHES +extern uintptr_t objc_opt_offsets[__OBJC_OPT_OFFSETS_COUNT]; +#endif + +/* dyld_shared_cache_builder and obj-C agree on these definitions */ +struct preopt_cache_entry_t { + uint32_t sel_offs; + uint32_t imp_offs; +}; + +/* dyld_shared_cache_builder and obj-C agree on these definitions */ +struct preopt_cache_t { + int32_t fallback_class_offset; + union { + struct { + uint16_t shift : 5; + uint16_t mask : 11; + }; + uint16_t hash_params; + }; + uint16_t occupied : 14; + uint16_t has_inlines : 1; + uint16_t bit_one : 1; + preopt_cache_entry_t entries[]; + + inline int capacity() const { + return mask + 1; + } +}; + +// returns: +// - the cached IMP when one is found +// - nil if there's no cached value and the cache is dynamic +// - `value_on_constant_cache_miss` if there's no cached value and the cache is preoptimized +extern "C" IMP cache_getImp(Class cls, SEL sel, IMP value_on_constant_cache_miss = nil); struct cache_t { +private: + explicit_atomic _bucketsAndMaybeMask; + union { + struct { + explicit_atomic _maybeMask; +#if __LP64__ + uint16_t _flags; +#endif + uint16_t _occupied; + }; + explicit_atomic _originalPreoptCache; + }; + #if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED - explicit_atomic _buckets; - explicit_atomic _mask; -#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 - explicit_atomic _maskAndBuckets; - mask_t _mask_unused; + // _bucketsAndMaybeMask is a buckets_t pointer + // _maybeMask is the buckets mask + + static constexpr uintptr_t bucketsMask = ~0ul; + static_assert(!CONFIG_USE_PREOPT_CACHES, "preoptimized caches not supported"); +#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS + static constexpr uintptr_t maskShift = 48; + static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1; + static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << maskShift) - 1; + static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers."); +#if CONFIG_USE_PREOPT_CACHES + static constexpr uintptr_t preoptBucketsMarker = 1ul; + static constexpr uintptr_t preoptBucketsMask = bucketsMask & ~preoptBucketsMarker; +#endif +#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16 + // _bucketsAndMaybeMask is a buckets_t pointer in the low 48 bits + // _maybeMask is unused, the mask is stored in the top 16 bits. + // How much the mask is shifted by. static constexpr uintptr_t maskShift = 48; - + // Additional bits after the mask which must be zero. msgSend // takes advantage of these additional bits to construct the value // `mask << 4` from `_maskAndBuckets` in a single instruction. static constexpr uintptr_t maskZeroBits = 4; - + // The largest mask value we can store. static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1; @@ -287,40 +384,107 @@ struct cache_t { static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1; // Ensure we have enough bits for the buckets pointer. - static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers."); + static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, + "Bucket field doesn't have enough bits for arbitrary pointers."); + +#if CONFIG_USE_PREOPT_CACHES + static constexpr uintptr_t preoptBucketsMarker = 1ul; +#if __has_feature(ptrauth_calls) + // 63..60: hash_mask_shift + // 59..55: hash_shift + // 54.. 1: buckets ptr + auth + // 0: always 1 + static constexpr uintptr_t preoptBucketsMask = 0x007ffffffffffffe; + static inline uintptr_t preoptBucketsHashParams(const preopt_cache_t *cache) { + uintptr_t value = (uintptr_t)cache->shift << 55; + // masks have 11 bits but can be 0, so we compute + // the right shift for 0x7fff rather than 0xffff + return value | ((objc::mask16ShiftBits(cache->mask) - 1) << 60); + } +#else + // 63..53: hash_mask + // 52..48: hash_shift + // 47.. 1: buckets ptr + // 0: always 1 + static constexpr uintptr_t preoptBucketsMask = 0x0000fffffffffffe; + static inline uintptr_t preoptBucketsHashParams(const preopt_cache_t *cache) { + return (uintptr_t)cache->hash_params << 48; + } +#endif +#endif // CONFIG_USE_PREOPT_CACHES #elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4 - // _maskAndBuckets stores the mask shift in the low 4 bits, and - // the buckets pointer in the remainder of the value. The mask - // shift is the value where (0xffff >> shift) produces the correct - // mask. This is equal to 16 - log2(cache_size). - explicit_atomic _maskAndBuckets; - mask_t _mask_unused; + // _bucketsAndMaybeMask is a buckets_t pointer in the top 28 bits + // _maybeMask is unused, the mask length is stored in the low 4 bits static constexpr uintptr_t maskBits = 4; static constexpr uintptr_t maskMask = (1 << maskBits) - 1; static constexpr uintptr_t bucketsMask = ~maskMask; + static_assert(!CONFIG_USE_PREOPT_CACHES, "preoptimized caches not supported"); #else #error Unknown cache mask storage type. #endif - -#if __LP64__ - uint16_t _flags; -#endif - uint16_t _occupied; -public: - static bucket_t *emptyBuckets(); - - struct bucket_t *buckets(); - mask_t mask(); - mask_t occupied(); + bool isConstantEmptyCache() const; + bool canBeFreed() const; + mask_t mask() const; + +#if CONFIG_USE_PREOPT_CACHES + void initializeToPreoptCacheInDisguise(const preopt_cache_t *cache); + const preopt_cache_t *disguised_preopt_cache() const; +#endif + void incrementOccupied(); void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask); + + void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld); + void collect_free(bucket_t *oldBuckets, mask_t oldCapacity); + + static bucket_t *emptyBuckets(); + static bucket_t *allocateBuckets(mask_t newCapacity); + static bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true); + static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap); + void bad_cache(id receiver, SEL sel) __attribute__((noreturn, cold)); + +public: + // The following four fields are public for objcdt's use only. + // objcdt reaches into fields while the process is suspended + // hence doesn't care for locks and pesky little details like this + // and can safely use these. + unsigned capacity() const; + struct bucket_t *buckets() const; + Class cls() const; + +#if CONFIG_USE_PREOPT_CACHES + const preopt_cache_t *preopt_cache() const; +#endif + + mask_t occupied() const; void initializeToEmpty(); - unsigned capacity(); - bool isConstantEmptyCache(); - bool canBeFreed(); +#if CONFIG_USE_PREOPT_CACHES + bool isConstantOptimizedCache(bool strict = false, uintptr_t empty_addr = (uintptr_t)&_objc_empty_cache) const; + bool shouldFlush(SEL sel, IMP imp) const; + bool isConstantOptimizedCacheWithInlinedSels() const; + Class preoptFallbackClass() const; + void maybeConvertToPreoptimized(); + void initializeToEmptyOrPreoptimizedInDisguise(); +#else + inline bool isConstantOptimizedCache(bool strict = false, uintptr_t empty_addr = 0) const { return false; } + inline bool shouldFlush(SEL sel, IMP imp) const { + return cache_getImp(cls(), sel) == imp; + } + inline bool isConstantOptimizedCacheWithInlinedSels() const { return false; } + inline void initializeToEmptyOrPreoptimizedInDisguise() { initializeToEmpty(); } +#endif + + void insert(SEL sel, IMP imp, id receiver); + void copyCacheNolock(objc_imp_cache_entry *buffer, int len); + void destroy(); + void eraseNolock(const char *func); + + static void init(); + static void collectNolock(bool collectALot); + static size_t bytesForCapacity(uint32_t cap); #if __LP64__ bool getBit(uint16_t flags) const { @@ -383,14 +547,6 @@ public: // nothing } #endif - - static size_t bytesForCapacity(uint32_t cap); - static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap); - - void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld); - void insert(Class cls, SEL sel, IMP imp, id receiver); - - static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn, cold)); }; @@ -398,6 +554,29 @@ public: typedef struct classref * classref_t; +/*********************************************************************** +* RelativePointer +* A pointer stored as an offset from the address of that offset. +* +* The target address is computed by taking the address of this struct +* and adding the offset stored within it. This is a 32-bit signed +* offset giving ±2GB of range. +**********************************************************************/ +template +struct RelativePointer: nocopy_t { + int32_t offset; + + T get() const { + if (offset == 0) + return nullptr; + uintptr_t base = (uintptr_t)&offset; + uintptr_t signExtendedOffset = (uintptr_t)(intptr_t)offset; + uintptr_t pointer = base + signExtendedOffset; + return (T)pointer; + } +}; + + #ifdef __PTRAUTH_INTRINSICS__ # define StubClassInitializerPtrauth __ptrauth(ptrauth_key_function_pointer, 1, 0xc671) #else @@ -408,20 +587,27 @@ struct stub_class_t { _objc_swiftMetadataInitializer StubClassInitializerPtrauth initializer; }; +// A pointer modifier that does nothing to the pointer. +struct PointerModifierNop { + template + static T *modify(__unused const ListType &list, T *ptr) { return ptr; } +}; + /*********************************************************************** -* entsize_list_tt +* entsize_list_tt * Generic implementation of an array of non-fragile structs. * * Element is the struct type (e.g. method_t) * List is the specialization of entsize_list_tt (e.g. method_list_t) * FlagMask is used to stash extra bits in the entsize field * (e.g. method list fixup markers) +* PointerModifier is applied to the element pointers retrieved from +* the array. **********************************************************************/ -template +template struct entsize_list_tt { uint32_t entsizeAndFlags; uint32_t count; - Element first; uint32_t entsize() const { return entsizeAndFlags & ~FlagMask; @@ -432,7 +618,7 @@ struct entsize_list_tt { Element& getOrEnd(uint32_t i) const { ASSERT(i <= count); - return *(Element *)((uint8_t *)&first + i*entsize()); + return *PointerModifier::modify(*this, (Element *)((uint8_t *)this + sizeof(*this) + i*entsize())); } Element& get(uint32_t i) const { ASSERT(i < count); @@ -444,15 +630,7 @@ struct entsize_list_tt { } static size_t byteSize(uint32_t entsize, uint32_t count) { - return sizeof(entsize_list_tt) + (count-1)*entsize; - } - - List *duplicate() const { - auto *dup = (List *)calloc(this->byteSize(), 1); - dup->entsizeAndFlags = this->entsizeAndFlags; - dup->count = this->count; - std::copy(begin(), end(), dup->begin()); - return dup; + return sizeof(entsize_list_tt) + count*entsize; } struct iterator; @@ -540,19 +718,146 @@ struct entsize_list_tt { }; +namespace objc { +// Let method_t::small use this from objc-private.h. +static inline bool inSharedCache(uintptr_t ptr); +} + struct method_t { - SEL name; - const char *types; - MethodListIMP imp; + static const uint32_t smallMethodListFlag = 0x80000000; + + method_t(const method_t &other) = delete; + + // The representation of a "big" method. This is the traditional + // representation of three pointers storing the selector, types + // and implementation. + struct big { + SEL name; + const char *types; + MethodListIMP imp; + }; + +private: + bool isSmall() const { + return ((uintptr_t)this & 1) == 1; + } + + // The representation of a "small" method. This stores three + // relative offsets to the name, types, and implementation. + struct small { + // The name field either refers to a selector (in the shared + // cache) or a selref (everywhere else). + RelativePointer name; + RelativePointer types; + RelativePointer imp; + + bool inSharedCache() const { + return (CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS && + objc::inSharedCache((uintptr_t)this)); + } + }; + + small &small() const { + ASSERT(isSmall()); + return *(struct small *)((uintptr_t)this & ~(uintptr_t)1); + } + + IMP remappedImp(bool needsLock) const; + void remapImp(IMP imp); + objc_method_description *getSmallDescription() const; + +public: + static const auto bigSize = sizeof(struct big); + static const auto smallSize = sizeof(struct small); + + // The pointer modifier used with method lists. When the method + // list contains small methods, set the bottom bit of the pointer. + // We use that bottom bit elsewhere to distinguish between big + // and small methods. + struct pointer_modifier { + template + static method_t *modify(const ListType &list, method_t *ptr) { + if (list.flags() & smallMethodListFlag) + return (method_t *)((uintptr_t)ptr | 1); + return ptr; + } + }; + + big &big() const { + ASSERT(!isSmall()); + return *(struct big *)this; + } + + SEL name() const { + if (isSmall()) { + return (small().inSharedCache() + ? (SEL)small().name.get() + : *(SEL *)small().name.get()); + } else { + return big().name; + } + } + const char *types() const { + return isSmall() ? small().types.get() : big().types; + } + IMP imp(bool needsLock) const { + if (isSmall()) { + IMP imp = remappedImp(needsLock); + if (!imp) + imp = ptrauth_sign_unauthenticated(small().imp.get(), + ptrauth_key_function_pointer, 0); + return imp; + } + return big().imp; + } + + SEL getSmallNameAsSEL() const { + ASSERT(small().inSharedCache()); + return (SEL)small().name.get(); + } + + SEL getSmallNameAsSELRef() const { + ASSERT(!small().inSharedCache()); + return *(SEL *)small().name.get(); + } + + void setName(SEL name) { + if (isSmall()) { + ASSERT(!small().inSharedCache()); + *(SEL *)small().name.get() = name; + } else { + big().name = name; + } + } + + void setImp(IMP imp) { + if (isSmall()) { + remapImp(imp); + } else { + big().imp = imp; + } + } + + objc_method_description *getDescription() const { + return isSmall() ? getSmallDescription() : (struct objc_method_description *)this; + } struct SortBySELAddress : - public std::binary_function + public std::binary_function { - bool operator() (const method_t& lhs, - const method_t& rhs) + bool operator() (const struct method_t::big& lhs, + const struct method_t::big& rhs) { return lhs.name < rhs.name; } }; + + method_t &operator=(const method_t &other) { + ASSERT(!isSmall()); + big().name = other.name(); + big().types = other.types(); + big().imp = other.imp(false); + return *this; + } }; struct ivar_t { @@ -583,7 +888,15 @@ struct property_t { }; // Two bits of entsize are used for fixup markers. -struct method_list_t : entsize_list_tt { +// Reserve the top half of entsize for more flags. We never +// need entry sizes anywhere close to 64kB. +// +// Currently there is one flag defined: the small method list flag, +// method_t::smallMethodListFlag. Other flags are currently ignored. +// (NOTE: these bits are only ignored on runtimes that support small +// method lists. Older runtimes will treat them as part of the entry +// size!) +struct method_list_t : entsize_list_tt { bool isUniqued() const; bool isFixedUp() const; void setFixedUp(); @@ -594,6 +907,31 @@ struct method_list_t : entsize_list_tt { ASSERT(i < count); return i; } + + bool isSmallList() const { + return flags() & method_t::smallMethodListFlag; + } + + bool isExpectedSize() const { + if (isSmallList()) + return entsize() == method_t::smallSize; + else + return entsize() == method_t::bigSize; + } + + method_list_t *duplicate() const { + method_list_t *dup; + if (isSmallList()) { + dup = (method_list_t *)calloc(byteSize(method_t::bigSize, count), 1); + dup->entsizeAndFlags = method_t::bigSize; + } else { + dup = (method_list_t *)calloc(this->byteSize(), 1); + dup->entsizeAndFlags = this->entsizeAndFlags; + } + dup->count = this->count; + std::copy(begin(), end(), dup->begin()); + return dup; + } }; struct ivar_list_t : entsize_list_tt { @@ -643,7 +981,7 @@ struct protocol_t : objc_object { bool isCanonical() const; void clearIsCanonical(); -# define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f)) +# define HAS_FIELD(f) ((uintptr_t)(&f) < ((uintptr_t)this + size)) bool hasExtendedMethodTypesField() const { return HAS_FIELD(_extendedMethodTypes); @@ -704,10 +1042,15 @@ struct class_ro_t { uint32_t reserved; #endif - const uint8_t * ivarLayout; - - const char * name; - method_list_t * baseMethodList; + union { + const uint8_t * ivarLayout; + Class nonMetaclass; + }; + + explicit_atomic name; + // With ptrauth, this is signed if it points to a small list, but + // may be unsigned if it points to a big list. + void *baseMethodList; protocol_list_t * baseProtocols; const ivar_list_t * ivars; @@ -725,31 +1068,117 @@ struct class_ro_t { } } + const char *getName() const { + return name.load(std::memory_order_acquire); + } + + static const uint16_t methodListPointerDiscriminator = 0xC310; +#if 0 // FIXME: enable this when we get a non-empty definition of __ptrauth_objc_method_list_pointer from ptrauth.h. + static_assert(std::is_same< + void * __ptrauth_objc_method_list_pointer *, + void * __ptrauth(ptrauth_key_method_list_pointer, 1, methodListPointerDiscriminator) *>::value, + "Method list pointer signing discriminator must match ptrauth.h"); +#endif + method_list_t *baseMethods() const { - return baseMethodList; +#if __has_feature(ptrauth_calls) + method_list_t *ptr = ptrauth_strip((method_list_t *)baseMethodList, ptrauth_key_method_list_pointer); + if (ptr == nullptr) + return nullptr; + + // Don't auth if the class_ro and the method list are both in the shared cache. + // This is secure since they'll be read-only, and this allows the shared cache + // to cut down on the number of signed pointers it has. + bool roInSharedCache = objc::inSharedCache((uintptr_t)this); + bool listInSharedCache = objc::inSharedCache((uintptr_t)ptr); + if (roInSharedCache && listInSharedCache) + return ptr; + + // Auth all other small lists. + if (ptr->isSmallList()) + ptr = ptrauth_auth_data((method_list_t *)baseMethodList, + ptrauth_key_method_list_pointer, + ptrauth_blend_discriminator(&baseMethodList, + methodListPointerDiscriminator)); + return ptr; +#else + return (method_list_t *)baseMethodList; +#endif + } + + uintptr_t baseMethodListPtrauthData() const { + return ptrauth_blend_discriminator(&baseMethodList, + methodListPointerDiscriminator); } class_ro_t *duplicate() const { - if (flags & RO_HAS_SWIFT_INITIALIZER) { - size_t size = sizeof(*this) + sizeof(_swiftMetadataInitializer_NEVER_USE[0]); - class_ro_t *ro = (class_ro_t *)memdup(this, size); + bool hasSwiftInitializer = flags & RO_HAS_SWIFT_INITIALIZER; + + size_t size = sizeof(*this); + if (hasSwiftInitializer) + size += sizeof(_swiftMetadataInitializer_NEVER_USE[0]); + + class_ro_t *ro = (class_ro_t *)memdup(this, size); + + if (hasSwiftInitializer) ro->_swiftMetadataInitializer_NEVER_USE[0] = this->_swiftMetadataInitializer_NEVER_USE[0]; - return ro; + +#if __has_feature(ptrauth_calls) + // Re-sign the method list pointer if it was signed. + // NOTE: It is possible for a signed pointer to have a signature + // that is all zeroes. This is indistinguishable from a raw pointer. + // This code will treat such a pointer as signed and re-sign it. A + // false positive is safe: method list pointers are either authed or + // stripped, so if baseMethods() doesn't expect it to be signed, it + // will ignore the signature. + void *strippedBaseMethodList = ptrauth_strip(baseMethodList, ptrauth_key_method_list_pointer); + void *signedBaseMethodList = ptrauth_sign_unauthenticated(strippedBaseMethodList, + ptrauth_key_method_list_pointer, + baseMethodListPtrauthData()); + if (baseMethodList == signedBaseMethodList) { + ro->baseMethodList = ptrauth_auth_and_resign(baseMethodList, + ptrauth_key_method_list_pointer, + baseMethodListPtrauthData(), + ptrauth_key_method_list_pointer, + ro->baseMethodListPtrauthData()); } else { - size_t size = sizeof(*this); - class_ro_t *ro = (class_ro_t *)memdup(this, size); - return ro; + // Special case: a class_ro_t in the shared cache pointing to a + // method list in the shared cache will not have a signed pointer, + // but the duplicate will be expected to have a signed pointer since + // it's not in the shared cache. Detect that and sign it. + bool roInSharedCache = objc::inSharedCache((uintptr_t)this); + bool listInSharedCache = objc::inSharedCache((uintptr_t)strippedBaseMethodList); + if (roInSharedCache && listInSharedCache) + ro->baseMethodList = ptrauth_sign_unauthenticated(strippedBaseMethodList, + ptrauth_key_method_list_pointer, + ro->baseMethodListPtrauthData()); } +#endif + + return ro; + } + + Class getNonMetaclass() const { + ASSERT(flags & RO_META); + return nonMetaclass; + } + + const uint8_t *getIvarLayout() const { + if (flags & RO_META) + return nullptr; + return ivarLayout; } }; /*********************************************************************** -* list_array_tt +* list_array_tt * Generic implementation for metadata that can be augmented by categories. * * Element is the underlying metadata type (e.g. method_t) * List is the metadata's list type (e.g. method_list_t) +* List is a template applied to Element to make Element*. Useful for +* applying qualifiers to the pointer type. * * A list_array_tt has one of three values: * - empty @@ -759,11 +1188,11 @@ struct class_ro_t { * countLists/beginLists/endLists iterate the metadata lists * count/begin/end iterate the underlying metadata elements **********************************************************************/ -template +template class Ptr> class list_array_tt { struct array_t { uint32_t count; - List* lists[0]; + Ptr lists[0]; static size_t byteSize(uint32_t count) { return sizeof(array_t) + count*sizeof(lists[0]); @@ -775,12 +1204,12 @@ class list_array_tt { protected: class iterator { - List * const *lists; - List * const *listsEnd; + const Ptr *lists; + const Ptr *listsEnd; typename List::iterator m, mEnd; public: - iterator(List *const *begin, List *const *end) + iterator(const Ptr *begin, const Ptr *end) : lists(begin), listsEnd(end) { if (begin != end) { @@ -820,7 +1249,7 @@ class list_array_tt { private: union { - List* list; + Ptr list; uintptr_t arrayAndFlag; }; @@ -836,9 +1265,26 @@ class list_array_tt { arrayAndFlag = (uintptr_t)array | 1; } + void validate() { + for (auto cursor = beginLists(), end = endLists(); cursor != end; cursor++) + cursor->validate(); + } + public: list_array_tt() : list(nullptr) { } list_array_tt(List *l) : list(l) { } + list_array_tt(const list_array_tt &other) { + *this = other; + } + + list_array_tt &operator =(const list_array_tt &other) { + if (other.hasArray()) { + arrayAndFlag = other.arrayAndFlag; + } else { + list = other.list; + } + return *this; + } uint32_t count() const { uint32_t result = 0; @@ -856,14 +1302,13 @@ class list_array_tt { } iterator end() const { - List * const *e = endLists(); + auto e = endLists(); return iterator(e, e); } - - uint32_t countLists() { + inline uint32_t countLists(const std::function & peek) const { if (hasArray()) { - return array()->count; + return peek(array())->count; } else if (list) { return 1; } else { @@ -871,7 +1316,11 @@ class list_array_tt { } } - List* const * beginLists() const { + uint32_t countLists() { + return countLists([](array_t *x) { return x; }); + } + + const Ptr* beginLists() const { if (hasArray()) { return array()->lists; } else { @@ -879,7 +1328,7 @@ class list_array_tt { } } - List* const * endLists() const { + const Ptr* endLists() const { if (hasArray()) { return array()->lists + array()->count; } else if (list) { @@ -896,27 +1345,34 @@ class list_array_tt { // many lists -> many lists uint32_t oldCount = array()->count; uint32_t newCount = oldCount + addedCount; - setArray((array_t *)realloc(array(), array_t::byteSize(newCount))); + array_t *newArray = (array_t *)malloc(array_t::byteSize(newCount)); + newArray->count = newCount; array()->count = newCount; - memmove(array()->lists + addedCount, array()->lists, - oldCount * sizeof(array()->lists[0])); - memcpy(array()->lists, addedLists, - addedCount * sizeof(array()->lists[0])); + + for (int i = oldCount - 1; i >= 0; i--) + newArray->lists[i + addedCount] = array()->lists[i]; + for (unsigned i = 0; i < addedCount; i++) + newArray->lists[i] = addedLists[i]; + free(array()); + setArray(newArray); + validate(); } else if (!list && addedCount == 1) { // 0 lists -> 1 list list = addedLists[0]; + validate(); } else { // 1 list -> many lists - List* oldList = list; + Ptr oldList = list; uint32_t oldCount = oldList ? 1 : 0; uint32_t newCount = oldCount + addedCount; setArray((array_t *)malloc(array_t::byteSize(newCount))); array()->count = newCount; if (oldList) array()->lists[addedCount] = oldList; - memcpy(array()->lists, addedLists, - addedCount * sizeof(array()->lists[0])); + for (unsigned i = 0; i < addedCount; i++) + array()->lists[i] = addedLists[i]; + validate(); } } @@ -932,79 +1388,66 @@ class list_array_tt { } } - template - Result duplicate() { - Result result; - + template + void duplicateInto(Other &other) { if (hasArray()) { array_t *a = array(); - result.setArray((array_t *)memdup(a, a->byteSize())); + other.setArray((array_t *)memdup(a, a->byteSize())); for (uint32_t i = 0; i < a->count; i++) { - result.array()->lists[i] = a->lists[i]->duplicate(); + other.array()->lists[i] = a->lists[i]->duplicate(); } } else if (list) { - result.list = list->duplicate(); + other.list = list->duplicate(); } else { - result.list = nil; + other.list = nil; } - - return result; } }; +DECLARE_AUTHED_PTR_TEMPLATE(method_list_t) + class method_array_t : - public list_array_tt + public list_array_tt { - typedef list_array_tt Super; + typedef list_array_tt Super; public: method_array_t() : Super() { } method_array_t(method_list_t *l) : Super(l) { } - method_list_t * const *beginCategoryMethodLists() const { + const method_list_t_authed_ptr *beginCategoryMethodLists() const { return beginLists(); } - method_list_t * const *endCategoryMethodLists(Class cls) const; - - method_array_t duplicate() { - return Super::duplicate(); - } + const method_list_t_authed_ptr *endCategoryMethodLists(Class cls) const; }; class property_array_t : - public list_array_tt + public list_array_tt { - typedef list_array_tt Super; + typedef list_array_tt Super; public: property_array_t() : Super() { } property_array_t(property_list_t *l) : Super(l) { } - - property_array_t duplicate() { - return Super::duplicate(); - } }; class protocol_array_t : - public list_array_tt + public list_array_tt { - typedef list_array_tt Super; + typedef list_array_tt Super; public: protocol_array_t() : Super() { } protocol_array_t(protocol_list_t *l) : Super(l) { } - - protocol_array_t duplicate() { - return Super::duplicate(); - } }; struct class_rw_ext_t { - const class_ro_t *ro; + DECLARE_AUTHED_PTR_TEMPLATE(class_ro_t) + class_ro_t_authed_ptr ro; method_array_t methods; property_array_t properties; protocol_array_t protocols; @@ -1026,21 +1469,21 @@ struct class_rw_t { Class nextSiblingClass; private: - using ro_or_rw_ext_t = objc::PointerUnion; + using ro_or_rw_ext_t = objc::PointerUnion; const ro_or_rw_ext_t get_ro_or_rwe() const { return ro_or_rw_ext_t{ro_or_rw_ext}; } void set_ro_or_rwe(const class_ro_t *ro) { - ro_or_rw_ext_t{ro}.storeAt(ro_or_rw_ext, memory_order_relaxed); + ro_or_rw_ext_t{ro, &ro_or_rw_ext}.storeAt(ro_or_rw_ext, memory_order_relaxed); } void set_ro_or_rwe(class_rw_ext_t *rwe, const class_ro_t *ro) { // the release barrier is so that the class_rw_ext_t::ro initialization // is visible to lockless readers rwe->ro = ro; - ro_or_rw_ext_t{rwe}.storeAt(ro_or_rw_ext, memory_order_release); + ro_or_rw_ext_t{rwe, &ro_or_rw_ext}.storeAt(ro_or_rw_ext, memory_order_release); } class_rw_ext_t *extAlloc(const class_ro_t *ro, bool deep = false); @@ -1069,15 +1512,15 @@ public: } class_rw_ext_t *ext() const { - return get_ro_or_rwe().dyn_cast(); + return get_ro_or_rwe().dyn_cast(&ro_or_rw_ext); } class_rw_ext_t *extAllocIfNeeded() { auto v = get_ro_or_rwe(); if (fastpath(v.is())) { - return v.get(); + return v.get(&ro_or_rw_ext); } else { - return extAlloc(v.get()); + return extAlloc(v.get(&ro_or_rw_ext)); } } @@ -1088,15 +1531,15 @@ public: const class_ro_t *ro() const { auto v = get_ro_or_rwe(); if (slowpath(v.is())) { - return v.get()->ro; + return v.get(&ro_or_rw_ext)->ro; } - return v.get(); + return v.get(&ro_or_rw_ext); } void set_ro(const class_ro_t *ro) { auto v = get_ro_or_rwe(); if (v.is()) { - v.get()->ro = ro; + v.get(&ro_or_rw_ext)->ro = ro; } else { set_ro_or_rwe(ro); } @@ -1105,27 +1548,27 @@ public: const method_array_t methods() const { auto v = get_ro_or_rwe(); if (v.is()) { - return v.get()->methods; + return v.get(&ro_or_rw_ext)->methods; } else { - return method_array_t{v.get()->baseMethods()}; + return method_array_t{v.get(&ro_or_rw_ext)->baseMethods()}; } } const property_array_t properties() const { auto v = get_ro_or_rwe(); if (v.is()) { - return v.get()->properties; + return v.get(&ro_or_rw_ext)->properties; } else { - return property_array_t{v.get()->baseProperties}; + return property_array_t{v.get(&ro_or_rw_ext)->baseProperties}; } } const protocol_array_t protocols() const { auto v = get_ro_or_rwe(); if (v.is()) { - return v.get()->protocols; + return v.get(&ro_or_rw_ext)->protocols; } else { - return protocol_array_t{v.get()->baseProtocols}; + return protocol_array_t{v.get(&ro_or_rw_ext)->baseProtocols}; } } }; @@ -1147,12 +1590,10 @@ private: void setAndClearBits(uintptr_t set, uintptr_t clear) { ASSERT((set & clear) == 0); - uintptr_t oldBits; - uintptr_t newBits; + uintptr_t newBits, oldBits = LoadExclusive(&bits); do { - oldBits = LoadExclusive(&bits); newBits = (oldBits | set) & ~clear; - } while (!StoreReleaseExclusive(&bits, oldBits, newBits)); + } while (slowpath(!StoreReleaseExclusive(&bits, &oldBits, newBits))); } void setBits(uintptr_t set) { @@ -1182,7 +1623,7 @@ public: // Get the class's ro data, even in the presence of concurrent realization. // fixme this isn't really safe without a compiler barrier at least // and probably a memory barrier when realizeClass changes the data field - const class_ro_t *safe_ro() { + const class_ro_t *safe_ro() const { class_rw_t *maybe_rw = data(); if (maybe_rw->flags & RW_REALIZED) { // maybe_rw is rw @@ -1193,13 +1634,16 @@ public: } } - void setClassArrayIndex(unsigned Idx) { #if SUPPORT_INDEXED_ISA + void setClassArrayIndex(unsigned Idx) { // 0 is unused as then we can rely on zero-initialisation from calloc. ASSERT(Idx > 0); data()->index = Idx; -#endif } +#else + void setClassArrayIndex(__unused unsigned Idx) { + } +#endif unsigned classArrayIndex() { #if SUPPORT_INDEXED_ISA @@ -1242,11 +1686,49 @@ public: struct objc_class : objc_object { + objc_class(const objc_class&) = delete; + objc_class(objc_class&&) = delete; + void operator=(const objc_class&) = delete; + void operator=(objc_class&&) = delete; // Class ISA; Class superclass; cache_t cache; // formerly cache pointer and vtable class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags + Class getSuperclass() const { +#if __has_feature(ptrauth_calls) +# if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH + if (superclass == Nil) + return Nil; + +#if SUPERCLASS_SIGNING_TREAT_UNSIGNED_AS_NIL + void *stripped = ptrauth_strip((void *)superclass, ISA_SIGNING_KEY); + if ((void *)superclass == stripped) { + void *resigned = ptrauth_sign_unauthenticated(stripped, ISA_SIGNING_KEY, ptrauth_blend_discriminator(&superclass, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS)); + if ((void *)superclass != resigned) + return Nil; + } +#endif + + void *result = ptrauth_auth_data((void *)superclass, ISA_SIGNING_KEY, ptrauth_blend_discriminator(&superclass, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS)); + return (Class)result; + +# else + return (Class)ptrauth_strip((void *)superclass, ISA_SIGNING_KEY); +# endif +#else + return superclass; +#endif + } + + void setSuperclass(Class newSuperclass) { +#if ISA_SIGNING_SIGN_MODE == ISA_SIGNING_SIGN_ALL + superclass = (Class)ptrauth_sign_unauthenticated((void *)newSuperclass, ISA_SIGNING_KEY, ptrauth_blend_discriminator(&superclass, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS)); +#else + superclass = newSuperclass; +#endif + } + class_rw_t *data() const { return bits.data(); } @@ -1398,6 +1880,30 @@ struct objc_class : objc_object { void setInstancesRequireRawIsaRecursively(bool inherited = false); void printInstancesRequireRawIsa(bool inherited); +#if CONFIG_USE_PREOPT_CACHES + bool allowsPreoptCaches() const { + return !(bits.data()->flags & RW_NOPREOPT_CACHE); + } + bool allowsPreoptInlinedSels() const { + return !(bits.data()->flags & RW_NOPREOPT_SELS); + } + void setDisallowPreoptCaches() { + bits.data()->setFlags(RW_NOPREOPT_CACHE | RW_NOPREOPT_SELS); + } + void setDisallowPreoptInlinedSels() { + bits.data()->setFlags(RW_NOPREOPT_SELS); + } + void setDisallowPreoptCachesRecursively(const char *why); + void setDisallowPreoptInlinedSelsRecursively(const char *why); +#else + bool allowsPreoptCaches() const { return false; } + bool allowsPreoptInlinedSels() const { return false; } + void setDisallowPreoptCaches() { } + void setDisallowPreoptInlinedSels() { } + void setDisallowPreoptCachesRecursively(const char *why) { } + void setDisallowPreoptInlinedSelsRecursively(const char *why) { } +#endif + bool canAllocNonpointer() { ASSERT(!isFuture()); return !instancesRequireRawIsa(); @@ -1419,6 +1925,28 @@ struct objc_class : objc_object { return bits.isSwiftStable_ButAllowLegacyForNow(); } + uint32_t swiftClassFlags() { + return *(uint32_t *)(&bits + 1); + } + + bool usesSwiftRefcounting() { + if (!isSwiftStable()) return false; + return bool(swiftClassFlags() & 2); //ClassFlags::UsesSwiftRefcounting + } + + bool canCallSwiftRR() { + // !hasCustomCore() is being used as a proxy for isInitialized(). All + // classes with Swift refcounting are !hasCustomCore() (unless there are + // category or swizzling shenanigans), but that bit is not set until a + // class is initialized. Checking isInitialized requires an extra + // indirection that we want to avoid on RR fast paths. + // + // In the unlikely event that someone causes a class with Swift + // refcounting to be hasCustomCore(), we'll fall back to sending -retain + // or -release, which is still correct. + return !hasCustomCore() && usesSwiftRefcounting(); + } + bool isStubClass() const { uintptr_t isa = (uintptr_t)isaBits(); return 1 <= isa && isa < 16; @@ -1438,8 +1966,7 @@ struct objc_class : objc_object { // Check the true legacy vs stable distinguisher. // The low bit of Swift's ClassFlags is SET for true legacy // and UNSET for stable pretending to be legacy. - uint32_t swiftClassFlags = *(uint32_t *)(&bits + 1); - bool isActuallySwiftLegacy = bool(swiftClassFlags & 1); + bool isActuallySwiftLegacy = bool(swiftClassFlags() & 1); return !isActuallySwiftLegacy; } @@ -1525,11 +2052,13 @@ struct objc_class : objc_object { // Returns true if this is an unrealized future class. // Locking: To prevent concurrent realization, hold runtimeLock. bool isFuture() const { + if (isStubClass()) + return false; return data()->flags & RW_FUTURE; } - bool isMetaClass() { - ASSERT(this); + bool isMetaClass() const { + ASSERT_THIS_NOT_NULL; ASSERT(isRealized()); #if FAST_CACHE_META return cache.getBit(FAST_CACHE_META); @@ -1542,31 +2071,46 @@ struct objc_class : objc_object { bool isMetaClassMaybeUnrealized() { static_assert(offsetof(class_rw_t, flags) == offsetof(class_ro_t, flags), "flags alias"); static_assert(RO_META == RW_META, "flags alias"); + if (isStubClass()) + return false; return data()->flags & RW_META; } // NOT identical to this->ISA when this is a metaclass Class getMeta() { - if (isMetaClass()) return (Class)this; + if (isMetaClassMaybeUnrealized()) return (Class)this; else return this->ISA(); } bool isRootClass() { - return superclass == nil; + return getSuperclass() == nil; } bool isRootMetaclass() { return ISA() == (Class)this; } + + // If this class does not have a name already, we can ask Swift to construct one for us. + const char *installMangledNameForLazilyNamedClass(); + + // Get the class's mangled name, or NULL if the class has a lazy + // name that hasn't been created yet. + const char *nonlazyMangledName() const { + return bits.safe_ro()->getName(); + } const char *mangledName() { // fixme can't assert locks here - ASSERT(this); + ASSERT_THIS_NOT_NULL; - if (isRealized() || isFuture()) { - return data()->ro()->name; - } else { - return ((const class_ro_t *)data())->name; + const char *result = nonlazyMangledName(); + + if (!result) { + // This class lazily instantiates its name. Emplace and + // return it. + result = installMangledNameForLazilyNamedClass(); } + + return result; } const char *demangledName(bool needsLock); @@ -1595,7 +2139,7 @@ struct objc_class : objc_object { return word_align(unalignedInstanceSize()); } - size_t instanceSize(size_t extraBytes) const { + inline size_t instanceSize(size_t extraBytes) const { if (fastpath(cache.hasFastInstanceSize(extraBytes))) { return cache.fastInstanceSize(extraBytes); } @@ -1650,8 +2194,8 @@ struct swift_class_t : objc_class { struct category_t { const char *name; classref_t cls; - struct method_list_t *instanceMethods; - struct method_list_t *classMethods; + WrappedPtr instanceMethods; + WrappedPtr classMethods; struct protocol_list_t *protocols; struct property_list_t *instanceProperties; // Fields below this point are not always present on disk. diff --git a/runtime/objc-runtime-new.mm b/runtime/objc-runtime-new.mm index e158375..df3f9fa 100644 --- a/runtime/objc-runtime-new.mm +++ b/runtime/objc-runtime-new.mm @@ -32,7 +32,6 @@ #include "objc-private.h" #include "objc-runtime-new.h" #include "objc-file.h" -#include "objc-cache.h" #include "objc-zalloc.h" #include #include @@ -46,9 +45,9 @@ static void free_class(Class cls); static IMP addMethod(Class cls, SEL name, IMP imp, const char *types, bool replace); static void adjustCustomFlagsForMethodChange(Class cls, method_t *meth); static method_t *search_method_list(const method_list_t *mlist, SEL sel); -static bool method_lists_contains_any(method_list_t * const *mlists, method_list_t * const *end, +template static bool method_lists_contains_any(T *mlists, T *end, SEL sels[], size_t selcount); -static void flushCaches(Class cls); +static void flushCaches(Class cls, const char *func, bool (^predicate)(Class c)); static void initializeTaggedPointerObfuscator(void); #if SUPPORT_FIXUP static void fixupMessageRef(message_ref_t *msg); @@ -151,7 +150,19 @@ uintptr_t objc_indexed_classes_count = 0; asm("\n .globl _objc_absolute_packed_isa_class_mask" \ "\n _objc_absolute_packed_isa_class_mask = " STRINGIFY2(ISA_MASK)); -const uintptr_t objc_debug_isa_class_mask = ISA_MASK; +// a better definition is +// (uintptr_t)ptrauth_strip((void *)ISA_MASK, ISA_SIGNING_KEY) +// however we know that PAC uses bits outside of MACH_VM_MAX_ADDRESS +// so approximate the definition here to be constant +template +static constexpr T coveringMask(T n) { + for (T mask = 0; mask != ~T{0}; mask = (mask << 1) | 1) { + if ((n & mask) == n) return mask; + } + return ~T{0}; +} +const uintptr_t objc_debug_isa_class_mask = ISA_MASK & coveringMask(MACH_VM_MAX_ADDRESS - 1); + const uintptr_t objc_debug_isa_magic_mask = ISA_MAGIC_MASK; const uintptr_t objc_debug_isa_magic_value = ISA_MAGIC_VALUE; @@ -212,17 +223,64 @@ static bool didInitialAttachCategories = false; **********************************************************************/ bool didCallDyldNotifyRegister = false; + +/*********************************************************************** +* smallMethodIMPMap +* The map from small method pointers to replacement IMPs. +* +* Locking: runtimeLock must be held when accessing this map. +**********************************************************************/ +namespace objc { + static objc::LazyInitDenseMap smallMethodIMPMap; +} + +static IMP method_t_remappedImp_nolock(const method_t *m) { + runtimeLock.assertLocked(); + auto *map = objc::smallMethodIMPMap.get(false); + if (!map) + return nullptr; + auto iter = map->find(m); + if (iter == map->end()) + return nullptr; + return iter->second; +} + +IMP method_t::remappedImp(bool needsLock) const { + ASSERT(isSmall()); + if (needsLock) { + mutex_locker_t guard(runtimeLock); + return method_t_remappedImp_nolock(this); + } else { + return method_t_remappedImp_nolock(this); + } +} + +void method_t::remapImp(IMP imp) { + ASSERT(isSmall()); + runtimeLock.assertLocked(); + auto *map = objc::smallMethodIMPMap.get(true); + (*map)[this] = imp; +} + +objc_method_description *method_t::getSmallDescription() const { + static objc::LazyInitDenseMap map; + + mutex_locker_t guard(runtimeLock); + + auto &ptr = (*map.get(true))[this]; + if (!ptr) { + ptr = (objc_method_description *)malloc(sizeof *ptr); + ptr->name = name(); + ptr->types = (char *)types(); + } + return ptr; +} + /* Low two bits of mlist->entsize is used as the fixed-up marker. - PREOPTIMIZED VERSION: Method lists from shared cache are 1 (uniqued) or 3 (uniqued and sorted). (Protocol method lists are not sorted because of their extra parallel data) Runtime fixed-up method lists get 3. - UN-PREOPTIMIZED VERSION: - Method lists from shared cache are 1 (uniqued) or 3 (uniqued and sorted) - Shared cache's sorting and uniquing are not trusted, but do affect the - location of the selector name string. - Runtime fixed-up method lists get 2. High two bits of protocol->flags is used as the fixed-up marker. PREOPTIMIZED VERSION: @@ -234,18 +292,14 @@ bool didCallDyldNotifyRegister = false; Runtime fixed-up protocols get 3<<30. */ -static uint32_t fixed_up_method_list = 3; -static uint32_t uniqued_method_list = 1; +static const uint32_t fixed_up_method_list = 3; +static const uint32_t uniqued_method_list = 1; static uint32_t fixed_up_protocol = PROTOCOL_FIXED_UP_1; static uint32_t canonical_protocol = PROTOCOL_IS_CANONICAL; void disableSharedCacheOptimizations(void) { - fixed_up_method_list = 2; - // It is safe to set uniqued method lists to 0 as we'll never call it unless - // the method list was already in need of being fixed up - uniqued_method_list = 0; fixed_up_protocol = PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2; // Its safe to just set canonical protocol to 0 as we'll never call // clearIsCanonical() unless isCanonical() returned true, which can't happen @@ -258,7 +312,8 @@ bool method_list_t::isUniqued() const { } bool method_list_t::isFixedUp() const { - return flags() == fixed_up_method_list; + // Ignore any flags in the top bits, just look at the bottom two. + return (flags() & 0x3) == fixed_up_method_list; } void method_list_t::setFixedUp() { @@ -288,11 +343,11 @@ void protocol_t::clearIsCanonical() { } -method_list_t * const *method_array_t::endCategoryMethodLists(Class cls) const +const method_list_t_authed_ptr *method_array_t::endCategoryMethodLists(Class cls) const { auto mlists = beginLists(); auto mlistsEnd = endLists(); - + if (mlists == mlistsEnd || !cls->data()->ro()->baseMethods()) { // No methods, or no base methods. @@ -383,8 +438,7 @@ void *object_getIndexedIvars(id obj) { uint8_t *base = (uint8_t *)obj; - if (!obj) return nil; - if (obj->isTaggedPointer()) return nil; + if (obj->isTaggedPointerOrNil()) return nil; if (!obj->isClass()) return base + obj->ISA()->alignedInstanceSize(); @@ -536,7 +590,7 @@ printReplacements(Class cls, const locstamped_category_t *cats_list, uint32_t ca if (!mlist) continue; for (const auto& meth : *mlist) { - SEL s = sel_registerName(sel_cname(meth.name)); + SEL s = sel_registerName(sel_cname(meth.name())); // Search for replaced methods in method lookup order. // Complain about the first duplicate only. @@ -549,11 +603,11 @@ printReplacements(Class cls, const locstamped_category_t *cats_list, uint32_t ca if (!mlist2) continue; for (const auto& meth2 : *mlist2) { - SEL s2 = sel_registerName(sel_cname(meth2.name)); + SEL s2 = sel_registerName(sel_cname(meth2.name())); if (s == s2) { logReplacedMethod(cls->nameForLogging(), s, cls->isMetaClass(), cat->name, - meth2.imp, meth.imp); + meth2.imp(false), meth.imp(false)); goto complained; } } @@ -561,11 +615,11 @@ printReplacements(Class cls, const locstamped_category_t *cats_list, uint32_t ca // Look for method in cls for (const auto& meth2 : cls->data()->methods()) { - SEL s2 = sel_registerName(sel_cname(meth2.name)); + SEL s2 = sel_registerName(sel_cname(meth2.name())); if (s == s2) { logReplacedMethod(cls->nameForLogging(), s, cls->isMetaClass(), cat->name, - meth2.imp, meth.imp); + meth2.imp(false), meth.imp(false)); goto complained; } } @@ -628,7 +682,7 @@ foreach_realized_class_and_subclass_2(Class top, unsigned &count, cls = cls->data()->firstSubclass; } else { while (!cls->data()->nextSiblingClass && cls != top) { - cls = cls->superclass; + cls = cls->getSuperclass(); if (--count == 0) { _objc_fatal("Memory corruption in class list."); } @@ -798,20 +852,20 @@ class Mixin { static void scanAddedClassImpl(Class cls, bool isMeta) { - Class NSOClass = (isMeta ? metaclassNSObject() : classNSObject()); bool setCustom = NO, inherited = NO; if (isNSObjectSwizzled(isMeta)) { setCustom = YES; - } else if (cls == NSOClass) { - // NSObject is default but we need to check categories + } else if (Traits::knownClassHasDefaultImpl(cls, isMeta)) { + // This class is known to have the default implementations, + // but we need to check categories. auto &methods = as_objc_class(cls)->data()->methods(); setCustom = Traits::scanMethodLists(methods.beginCategoryMethodLists(), methods.endCategoryMethodLists(cls)); - } else if (!isMeta && !as_objc_class(cls)->superclass) { + } else if (!isMeta && !as_objc_class(cls)->getSuperclass()) { // Custom Root class setCustom = YES; - } else if (Traits::isCustom(as_objc_class(cls)->superclass)) { + } else if (Traits::isCustom(as_objc_class(cls)->getSuperclass())) { // Superclass is custom, therefore we are too. setCustom = YES; inherited = YES; @@ -829,6 +883,14 @@ class Mixin { } public: + static bool knownClassHasDefaultImpl(Class cls, bool isMeta) { + // Typically only NSObject has default implementations. + // Allow this to be extended by overriding (to allow + // SwiftObject, for example). + Class NSOClass = (isMeta ? metaclassNSObject() : classNSObject()); + return cls == NSOClass; + } + // Scan a class that is about to be marked Initialized for particular // bundles of selectors, and mark the class and its children // accordingly. @@ -892,7 +954,7 @@ public: static void scanChangedMethod(Class cls, const method_t *meth) { - if (fastpath(!Traits::isInterestingSelector(meth->name))) { + if (fastpath(!Traits::isInterestingSelector(meth->name()))) { return; } @@ -938,7 +1000,8 @@ struct AWZScanner : scanner::Mixin + static bool scanMethodLists(T *mlists, T *end) { SEL sels[2] = { @selector(alloc), @selector(allocWithZone:), }; return method_lists_contains_any(mlists, end, sels, 2); } @@ -972,7 +1035,8 @@ struct RRScanner : scanner::Mixin + static bool scanMethodLists(T *mlists, T *end) { SEL sels[8] = { @selector(retain), @selector(release), @@ -991,6 +1055,16 @@ struct RRScanner : scanner::Mixin { + static bool knownClassHasDefaultImpl(Class cls, bool isMeta) { + if (scanner::Mixin::knownClassHasDefaultImpl(cls, isMeta)) + return true; + if ((cls->isRootClass() || cls->isRootMetaclass()) + && strcmp(cls->mangledName(), "_TtCs12_SwiftObject") == 0) + return true; + + return false; + } + static bool isCustom(Class cls) { return cls->hasCustomCore(); } @@ -1007,7 +1081,8 @@ struct CoreScanner : scanner::Mixin { sel == @selector(isKindOfClass:) || sel == @selector(respondsToSelector:); } - static bool scanMethodLists(method_list_t * const *mlists, method_list_t * const *end) { + template + static bool scanMethodLists(T *mlists, T *end) { SEL sels[5] = { @selector(new), @selector(self), @@ -1114,7 +1189,7 @@ public: if (slowpath(PrintConnecting)) { _objc_inform("CLASS: found category %c%s(%s)", - cls->isMetaClass() ? '+' : '-', + cls->isMetaClassMaybeUnrealized() ? '+' : '-', cls->nameForLogging(), lc.cat->name); } @@ -1193,25 +1268,31 @@ fixupMethodList(method_list_t *mlist, bool bundleCopy, bool sort) // Unique selectors in list. for (auto& meth : *mlist) { - const char *name = sel_cname(meth.name); - meth.name = sel_registerNameNoLock(name, bundleCopy); + const char *name = sel_cname(meth.name()); + meth.setName(sel_registerNameNoLock(name, bundleCopy)); } } // Sort by selector address. - if (sort) { + // Don't try to sort small lists, as they're immutable. + // Don't try to sort big lists of nonstandard size, as stable_sort + // won't copy the entries properly. + if (sort && !mlist->isSmallList() && mlist->entsize() == method_t::bigSize) { method_t::SortBySELAddress sorter; - std::stable_sort(mlist->begin(), mlist->end(), sorter); + std::stable_sort(&mlist->begin()->big(), &mlist->end()->big(), sorter); } - // Mark method list as uniqued and sorted - mlist->setFixedUp(); + // Mark method list as uniqued and sorted. + // Can't mark small lists, since they're immutable. + if (!mlist->isSmallList()) { + mlist->setFixedUp(); + } } static void prepareMethodLists(Class cls, method_list_t **addedLists, int addedCount, - bool baseMethods, bool methodsFromBundle) + bool baseMethods, bool methodsFromBundle, const char *why) { runtimeLock.assertLocked(); @@ -1223,6 +1304,16 @@ prepareMethodLists(Class cls, method_list_t **addedLists, int addedCount, // Therefore we need not handle any special cases here. if (baseMethods) { ASSERT(cls->hasCustomAWZ() && cls->hasCustomRR() && cls->hasCustomCore()); + } else if (cls->cache.isConstantOptimizedCache()) { + cls->setDisallowPreoptCachesRecursively(why); + } else if (cls->allowsPreoptInlinedSels()) { +#if CONFIG_USE_PREOPT_CACHES + SEL *sels = (SEL *)objc_opt_offsets[OBJC_OPT_INLINED_METHODS_START]; + SEL *sels_end = (SEL *)objc_opt_offsets[OBJC_OPT_INLINED_METHODS_END]; + if (method_lists_contains_any(addedLists, addedLists + addedCount, sels, sels_end - sels)) { + cls->setDisallowPreoptInlinedSelsRecursively(why); + } +#endif } // Add method lists to array. @@ -1327,7 +1418,7 @@ attachCategories(Class cls, const locstamped_category_t *cats_list, uint32_t cat method_list_t *mlist = entry.cat->methodsForMeta(isMeta); if (mlist) { if (mcount == ATTACH_BUFSIZ) { - prepareMethodLists(cls, mlists, mcount, NO, fromBundle); + prepareMethodLists(cls, mlists, mcount, NO, fromBundle, __func__); rwe->methods.attachLists(mlists, mcount); mcount = 0; } @@ -1356,9 +1447,16 @@ attachCategories(Class cls, const locstamped_category_t *cats_list, uint32_t cat } if (mcount > 0) { - prepareMethodLists(cls, mlists + ATTACH_BUFSIZ - mcount, mcount, NO, fromBundle); + prepareMethodLists(cls, mlists + ATTACH_BUFSIZ - mcount, mcount, + NO, fromBundle, __func__); rwe->methods.attachLists(mlists + ATTACH_BUFSIZ - mcount, mcount); - if (flags & ATTACH_EXISTING) flushCaches(cls); + if (flags & ATTACH_EXISTING) { + flushCaches(cls, __func__, [](Class c){ + // constant caches have been dealt with in prepareMethodLists + // if the class still is constant here, it's fine to keep + return !c->cache.isConstantOptimizedCache(); + }); + } } rwe->properties.attachLists(proplists + ATTACH_BUFSIZ - propcount, propcount); @@ -1391,7 +1489,7 @@ static void methodizeClass(Class cls, Class previously) // Install methods and properties that the class implements itself. method_list_t *list = ro->baseMethods(); if (list) { - prepareMethodLists(cls, &list, 1, YES, isBundleClass(cls)); + prepareMethodLists(cls, &list, 1, YES, isBundleClass(cls), nullptr); if (rwe) rwe->methods.attachLists(&list, 1); } @@ -1433,9 +1531,9 @@ static void methodizeClass(Class cls, Class previously) for (const auto& meth : rw->methods()) { if (PrintConnecting) { _objc_inform("METHOD %c[%s %s]", isMeta ? '+' : '-', - cls->nameForLogging(), sel_getName(meth.name)); + cls->nameForLogging(), sel_getName(meth.name())); } - ASSERT(sel_registerName(sel_getName(meth.name)) == meth.name); + ASSERT(sel_registerName(sel_getName(meth.name())) == meth.name()); } #endif } @@ -1624,6 +1722,8 @@ static char *copySwiftV1MangledName(const char *string, bool isProtocol = false) // This is a misnomer: gdb_objc_realized_classes is actually a list of // named classes not in the dyld shared cache, whether realized or not. +// This list excludes lazily named classes, which have to be looked up +// using a getClass hook. NXMapTable *gdb_objc_realized_classes; // exported for debuggers in objc-gdb.h uintptr_t objc_debug_realized_class_generation_count; @@ -1754,7 +1854,7 @@ static void addFutureNamedClass(const char *name, Class cls) class_rw_t *rw = objc::zalloc(); class_ro_t *ro = (class_ro_t *)calloc(sizeof(class_ro_t), 1); - ro->name = strdupIfMutable(name); + ro->name.store(strdupIfMutable(name), std::memory_order_relaxed); rw->set_ro(ro); cls->setData(rw); cls->data()->flags = RO_FUTURE; @@ -1946,7 +2046,7 @@ static Class getMaybeUnrealizedNonMetaClass(Class metacls, id inst) // special case for root metaclass // where inst == inst->ISA() == metacls is possible if (metacls->ISA() == metacls) { - Class cls = metacls->superclass; + Class cls = metacls->getSuperclass(); ASSERT(cls->isRealized()); ASSERT(!cls->isMetaClass()); ASSERT(cls->ISA() == metacls); @@ -1964,7 +2064,7 @@ static Class getMaybeUnrealizedNonMetaClass(Class metacls, id inst) ASSERT(!cls->isMetaClassMaybeUnrealized()); return cls; } - cls = cls->superclass; + cls = cls->getSuperclass(); } #if DEBUG _objc_fatal("cls is not an instance of metacls"); @@ -1973,6 +2073,10 @@ static Class getMaybeUnrealizedNonMetaClass(Class metacls, id inst) #endif } + // See if the metaclass has a pointer to its nonmetaclass. + if (Class cls = metacls->bits.safe_ro()->getNonMetaclass()) + return cls; + // try name lookup { Class cls = getClassExceptSomeSwift(metacls->mangledName()); @@ -2197,9 +2301,15 @@ static void addSubclass(Class supercls, Class subcls) objc::RRScanner::scanAddedSubClass(subcls, supercls); objc::CoreScanner::scanAddedSubClass(subcls, supercls); + if (!supercls->allowsPreoptCaches()) { + subcls->setDisallowPreoptCachesRecursively(__func__); + } else if (!supercls->allowsPreoptInlinedSels()) { + subcls->setDisallowPreoptInlinedSelsRecursively(__func__); + } + // Special case: instancesRequireRawIsa does not propagate // from root class to root metaclass - if (supercls->instancesRequireRawIsa() && supercls->superclass) { + if (supercls->instancesRequireRawIsa() && supercls->getSuperclass()) { subcls->setInstancesRequireRawIsaRecursively(true); } } @@ -2216,7 +2326,7 @@ static void removeSubclass(Class supercls, Class subcls) runtimeLock.assertLocked(); ASSERT(supercls->isRealized()); ASSERT(subcls->isRealized()); - ASSERT(subcls->superclass == supercls); + ASSERT(subcls->getSuperclass() == supercls); objc_debug_realized_class_generation_count++; @@ -2263,23 +2373,23 @@ static NEVER_INLINE Protocol *getProtocol(const char *name) Protocol *result = (Protocol *)NXMapGet(protocols(), name); if (result) return result; + // Try table from dyld3 closure and dyld shared cache + result = getPreoptimizedProtocol(name); + if (result) return result; + // Try Swift-mangled equivalent of the given name. if (char *swName = copySwiftV1MangledName(name, true/*isProtocol*/)) { result = (Protocol *)NXMapGet(protocols(), swName); + + // Try table from dyld3 closure and dyld shared cache + if (!result) + result = getPreoptimizedProtocol(swName); + free(swName); - if (result) return result; + return result; } - // Try table from dyld shared cache - // Temporarily check that we are using the new table. Eventually this check - // will always be true. - // FIXME: Remove this check when we can - if (sharedCacheSupportsProtocolRoots()) { - result = getPreoptimizedProtocol(name); - if (result) return result; - } - - return nil; + return nullptr; } @@ -2468,10 +2578,22 @@ static void reconcileInstanceVariables(Class cls, Class supercls, const class_ro class_ro_t *ro_w = make_ro_writeable(rw); ro = rw->ro(); moveIvars(ro_w, super_ro->instanceSize); - gdb_objc_class_changed(cls, OBJC_CLASS_IVARS_CHANGED, ro->name); + gdb_objc_class_changed(cls, OBJC_CLASS_IVARS_CHANGED, ro->getName()); } } +static void validateAlreadyRealizedClass(Class cls) { + ASSERT(cls->isRealized()); +#if TARGET_OS_OSX + class_rw_t *rw = cls->data(); + size_t rwSize = malloc_size(rw); + + // Note: this check will need some adjustment if class_rw_t's + // size changes to not match the malloc bucket. + if (rwSize != sizeof(class_rw_t)) + _objc_fatal("realized class %p has corrupt data pointer %p", cls, rw); +#endif +} /*********************************************************************** * realizeClassWithoutSwift @@ -2490,7 +2612,10 @@ static Class realizeClassWithoutSwift(Class cls, Class previously) Class metacls; if (!cls) return nil; - if (cls->isRealized()) return cls; + if (cls->isRealized()) { + validateAlreadyRealizedClass(cls); + return cls; + } ASSERT(cls == remapClass(cls)); // fixme verify class is not in an un-dlopened part of the shared cache? @@ -2511,6 +2636,8 @@ static Class realizeClassWithoutSwift(Class cls, Class previously) cls->setData(rw); } + cls->cache.initializeToEmptyOrPreoptimizedInDisguise(); + #if FAST_CACHE_META if (isMeta) cls->cache.setBit(FAST_CACHE_META); #endif @@ -2534,7 +2661,7 @@ static Class realizeClassWithoutSwift(Class cls, Class previously) // or that Swift's initializers have already been called. // fixme that assumption will be wrong if we add support // for ObjC subclasses of Swift classes. - supercls = realizeClassWithoutSwift(remapClass(cls->superclass), nil); + supercls = realizeClassWithoutSwift(remapClass(cls->getSuperclass()), nil); metacls = realizeClassWithoutSwift(remapClass(cls->ISA()), nil); #if SUPPORT_NONPOINTER_ISA @@ -2553,13 +2680,13 @@ static Class realizeClassWithoutSwift(Class cls, Class previously) // Non-pointer isa disabled by environment or app SDK version instancesRequireRawIsa = true; } - else if (!hackedDispatch && 0 == strcmp(ro->name, "OS_object")) + else if (!hackedDispatch && 0 == strcmp(ro->getName(), "OS_object")) { // hack for libdispatch et al - isa also acts as vtable pointer hackedDispatch = true; instancesRequireRawIsa = true; } - else if (supercls && supercls->superclass && + else if (supercls && supercls->getSuperclass() && supercls->instancesRequireRawIsa()) { // This is also propagated by addSubclass() @@ -2578,7 +2705,7 @@ static Class realizeClassWithoutSwift(Class cls, Class previously) #endif // Update superclass and metaclass in case of remapping - cls->superclass = supercls; + cls->setSuperclass(supercls); cls->initClassIsa(metacls); // Reconcile instance variable offsets / layout. @@ -2700,7 +2827,7 @@ static Class realizeSwiftClass(Class cls) ASSERT(remapClass(cls) == cls); ASSERT(cls->isSwiftStable_ButAllowLegacyForNow()); ASSERT(!cls->isMetaClassMaybeUnrealized()); - ASSERT(cls->superclass); + ASSERT(cls->getSuperclass()); runtimeLock.unlock(); #endif @@ -2792,13 +2919,13 @@ missingWeakSuperclass(Class cls) { ASSERT(!cls->isRealized()); - if (!cls->superclass) { + if (!cls->getSuperclass()) { // superclass nil. This is normal for root classes only. return (!(cls->data()->flags & RO_ROOT)); } else { // superclass not nil. Check if a higher superclass is missing. - Class supercls = remapClass(cls->superclass); - ASSERT(cls != cls->superclass); + Class supercls = remapClass(cls->getSuperclass()); + ASSERT(cls != cls->getSuperclass()); ASSERT(cls != supercls); if (!supercls) return YES; if (supercls->isRealized()) return NO; @@ -2917,6 +3044,10 @@ BOOL _class_isFutureClass(Class cls) return cls && cls->isFuture(); } +BOOL _class_isSwift(Class _Nullable cls) +{ + return cls && cls->isSwiftStable(); +} /*********************************************************************** * _objc_flush_caches @@ -2925,24 +3056,25 @@ BOOL _class_isFutureClass(Class cls) * and subclasses thereof. Nil flushes all classes.) * Locking: acquires runtimeLock **********************************************************************/ -static void flushCaches(Class cls) +static void flushCaches(Class cls, const char *func, bool (^predicate)(Class)) { runtimeLock.assertLocked(); #if CONFIG_USE_CACHE_LOCK mutex_locker_t lock(cacheUpdateLock); #endif + const auto handler = ^(Class c) { + if (predicate(c)) { + c->cache.eraseNolock(func); + } + + return true; + }; + if (cls) { - foreach_realized_class_and_subclass(cls, [](Class c){ - cache_erase_nolock(c); - return true; - }); - } - else { - foreach_realized_class_and_metaclass([](Class c){ - cache_erase_nolock(c); - return true; - }); + foreach_realized_class_and_subclass(cls, handler); + } else { + foreach_realized_class_and_metaclass(handler); } } @@ -2951,9 +3083,13 @@ void _objc_flush_caches(Class cls) { { mutex_locker_t lock(runtimeLock); - flushCaches(cls); - if (cls && cls->superclass && cls != cls->getIsa()) { - flushCaches(cls->getIsa()); + flushCaches(cls, __func__, [](Class c){ + return !c->cache.isConstantOptimizedCache(); + }); + if (cls && !cls->isMetaClass() && !cls->isRootClass()) { + flushCaches(cls->ISA(), __func__, [](Class c){ + return !c->cache.isConstantOptimizedCache(); + }); } else { // cls is a root class or root metaclass. Its metaclass is itself // or a subclass so the metaclass caches were already flushed. @@ -2967,7 +3103,7 @@ void _objc_flush_caches(Class cls) #else mutex_locker_t lock(runtimeLock); #endif - cache_collect(true); + cache_t::collectNolock(true); } } @@ -3053,8 +3189,8 @@ static void load_categories_nolock(header_info *hi) { } }; - processCatlist(_getObjc2CategoryList(hi, &count)); - processCatlist(_getObjc2CategoryList2(hi, &count)); + processCatlist(hi->catlist(&count)); + processCatlist(hi->catlist2(&count)); } static void loadAllCategories() { @@ -3188,7 +3324,7 @@ bool mustReadClasses(header_info *hi, bool hasDyldRoots) **********************************************************************/ Class readClass(Class cls, bool headerIsBundle, bool headerIsPreoptimized) { - const char *mangledName = cls->mangledName(); + const char *mangledName = cls->nonlazyMangledName(); if (missingWeakSuperclass(cls)) { // No superclass (probably weak-linked). @@ -3199,45 +3335,60 @@ Class readClass(Class cls, bool headerIsBundle, bool headerIsPreoptimized) cls->nameForLogging()); } addRemappedClass(cls, nil); - cls->superclass = nil; + cls->setSuperclass(nil); return nil; } cls->fixupBackwardDeployingStableSwift(); Class replacing = nil; - if (Class newCls = popFutureNamedClass(mangledName)) { - // This name was previously allocated as a future class. - // Copy objc_class to future class's struct. - // Preserve future's rw data block. - - if (newCls->isAnySwift()) { - _objc_fatal("Can't complete future class request for '%s' " - "because the real class is too big.", - cls->nameForLogging()); + if (mangledName != nullptr) { + if (Class newCls = popFutureNamedClass(mangledName)) { + // This name was previously allocated as a future class. + // Copy objc_class to future class's struct. + // Preserve future's rw data block. + + if (newCls->isAnySwift()) { + _objc_fatal("Can't complete future class request for '%s' " + "because the real class is too big.", + cls->nameForLogging()); + } + + class_rw_t *rw = newCls->data(); + const class_ro_t *old_ro = rw->ro(); + memcpy(newCls, cls, sizeof(objc_class)); + + // Manually set address-discriminated ptrauthed fields + // so that newCls gets the correct signatures. + newCls->setSuperclass(cls->getSuperclass()); + newCls->initIsa(cls->getIsa()); + + rw->set_ro((class_ro_t *)newCls->data()); + newCls->setData(rw); + freeIfMutable((char *)old_ro->getName()); + free((void *)old_ro); + + addRemappedClass(cls, newCls); + + replacing = cls; + cls = newCls; } - - class_rw_t *rw = newCls->data(); - const class_ro_t *old_ro = rw->ro(); - memcpy(newCls, cls, sizeof(objc_class)); - rw->set_ro((class_ro_t *)newCls->data()); - newCls->setData(rw); - freeIfMutable((char *)old_ro->name); - free((void *)old_ro); - - addRemappedClass(cls, newCls); - - replacing = cls; - cls = newCls; } if (headerIsPreoptimized && !replacing) { // class list built in shared cache // fixme strict assert doesn't work because of duplicates // ASSERT(cls == getClass(name)); - ASSERT(getClassExceptSomeSwift(mangledName)); + ASSERT(mangledName == nullptr || getClassExceptSomeSwift(mangledName)); } else { - addNamedClass(cls, mangledName, replacing); + if (mangledName) { //some Swift generic classes can lazily generate their names + addNamedClass(cls, mangledName, replacing); + } else { + Class meta = cls->ISA(); + const class_ro_t *metaRO = meta->bits.safe_ro(); + ASSERT(metaRO->getNonMetaclass() && "Metaclass with lazy name must have a pointer to the corresponding nonmetaclass."); + ASSERT(metaRO->getNonMetaclass() == cls && "Metaclass nonmetaclass pointer must equal the original class."); + } addClassTableEntry(cls); } @@ -3326,9 +3477,8 @@ readProtocol(protocol_t *newproto, Class protocol_class, } } } - else if (newproto->size >= sizeof(protocol_t)) { - // New protocol from an un-preoptimized image - // with sufficient storage. Fix it up in place. + else { + // New protocol from an un-preoptimized image. Fix it up in place. // fixme duplicate protocols from unloadable bundle newproto->initIsa(protocol_class); // fixme pinned insertFn(protocol_map, newproto->mangledName, newproto); @@ -3337,26 +3487,6 @@ readProtocol(protocol_t *newproto, Class protocol_class, newproto, newproto->nameForLogging()); } } - else { - // New protocol from an un-preoptimized image - // with insufficient storage. Reallocate it. - // fixme duplicate protocols from unloadable bundle - size_t size = max(sizeof(protocol_t), (size_t)newproto->size); - protocol_t *installedproto = (protocol_t *)calloc(size, 1); - memcpy(installedproto, newproto, newproto->size); - installedproto->size = (typeof(installedproto->size))size; - - installedproto->initIsa(protocol_class); // fixme pinned - insertFn(protocol_map, installedproto->mangledName, installedproto); - if (PrintProtocols) { - _objc_inform("PROTOCOLS: protocol at %p is %s ", - installedproto, installedproto->nameForLogging()); - _objc_inform("PROTOCOLS: protocol at %p is %s " - "(reallocated to %p)", - newproto, installedproto->nameForLogging(), - installedproto); - } - } } /*********************************************************************** @@ -3414,12 +3544,11 @@ void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int un # if TARGET_OS_OSX // Disable non-pointer isa if the app is too old // (linked before OS X 10.11) - if (dyld_get_program_sdk_version() < DYLD_MACOSX_VERSION_10_11) { + if (!dyld_program_sdk_at_least(dyld_platform_version_macOS_10_11)) { DisableNonpointerIsa = true; if (PrintRawIsa) { _objc_inform("RAW ISA: disabling non-pointer isa because " - "the app is too old (SDK version " SDK_FORMAT ")", - FORMAT_SDK(dyld_get_program_sdk_version())); + "the app is too old."); } } @@ -3554,7 +3683,6 @@ void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int un ts.log("IMAGE TIMES: fix up objc_msgSend_fixup"); #endif - bool cacheSupportsProtocolRoots = sharedCacheSupportsProtocolRoots(); // Discover protocols. Fix up protocol refs. for (EACH_HEADER) { @@ -3570,7 +3698,7 @@ void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int un // in the shared cache is marked with isCanonical() and that may not // be true if some non-shared cache binary was chosen as the canonical // definition - if (launchTime && isPreoptimized && cacheSupportsProtocolRoots) { + if (launchTime && isPreoptimized) { if (PrintProtocols) { _objc_inform("PROTOCOLS: Skipping reading protocols in image: %s", hi->fname()); @@ -3597,7 +3725,7 @@ void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int un // shared cache definition of a protocol. We can skip the check on // launch, but have to visit @protocol refs for shared cache images // loaded later. - if (launchTime && cacheSupportsProtocolRoots && hi->isPreoptimized()) + if (launchTime && hi->isPreoptimized()) continue; protocol_t **protolist = _getObjc2ProtocolRefs(hi, &count); for (i = 0; i < count; i++) { @@ -3627,8 +3755,7 @@ void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int un // Realize non-lazy classes (for +load methods and static instances) for (EACH_HEADER) { - classref_t const *classlist = - _getObjc2NonlazyClassList(hi, &count); + classref_t const *classlist = hi->nlclslist(&count); for (i = 0; i < count; i++) { Class cls = remapClass(classlist[i]); if (!cls) continue; @@ -3699,13 +3826,13 @@ void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int un } const method_list_t *mlist; - if ((mlist = ((class_ro_t *)cls->data())->baseMethods())) { + if ((mlist = cls->bits.safe_ro()->baseMethods())) { PreoptTotalMethodLists++; if (mlist->isFixedUp()) { PreoptOptimizedMethodLists++; } } - if ((mlist=((class_ro_t *)cls->ISA()->data())->baseMethods())) { + if ((mlist = cls->ISA()->bits.safe_ro()->baseMethods())) { PreoptTotalMethodLists++; if (mlist->isFixedUp()) { PreoptOptimizedMethodLists++; @@ -3749,7 +3876,7 @@ static void schedule_class_load(Class cls) if (cls->data()->flags & RW_LOADED) return; // Ensure superclass-first ordering - schedule_class_load(cls->superclass); + schedule_class_load(cls->getSuperclass()); add_class_to_loadable_list(cls); cls->setInfo(RW_LOADED); @@ -3808,7 +3935,7 @@ void _unload_image(header_info *hi) // Ignore __objc_catlist2. We don't support unloading Swift // and we never will. - category_t * const *catlist = _getObjc2CategoryList(hi, &count); + category_t * const *catlist = hi->catlist(&count); for (i = 0; i < count; i++) { category_t *cat = catlist[i]; Class cls = remapClass(cat->cls); @@ -3838,7 +3965,7 @@ void _unload_image(header_info *hi) if (cls) classes.insert(cls); } - classlist = _getObjc2NonlazyClassList(hi, &count); + classlist = hi->nlclslist(&count); for (i = 0; i < count; i++) { Class cls = remapClass(classlist[i]); if (cls) classes.insert(cls); @@ -3873,14 +4000,19 @@ struct objc_method_description * method_getDescription(Method m) { if (!m) return nil; - return (struct objc_method_description *)m; + return m->getDescription(); } IMP method_getImplementation(Method m) { - return m ? m->imp : nil; + return m ? m->imp(true) : nil; +} + +IMPAndSEL _method_getImplementationAndName(Method m) +{ + return { m->imp(true), m->name() }; } @@ -3896,8 +4028,8 @@ method_getName(Method m) { if (!m) return nil; - ASSERT(m->name == sel_registerName(sel_getName(m->name))); - return m->name; + ASSERT(m->name() == sel_registerName(sel_getName(m->name()))); + return m->name(); } @@ -3911,7 +4043,7 @@ const char * method_getTypeEncoding(Method m) { if (!m) return nil; - return m->types; + return m->types(); } @@ -3928,14 +4060,18 @@ _method_setImplementation(Class cls, method_t *m, IMP imp) if (!m) return nil; if (!imp) return nil; - IMP old = m->imp; - m->imp = imp; + IMP old = m->imp(false); + SEL sel = m->name(); + + m->setImp(imp); // Cache updates are slow if cls is nil (i.e. unknown) // RR/AWZ updates are slow if cls is nil (i.e. unknown) // fixme build list of classes whose Methods are known externally? - flushCaches(cls); + flushCaches(cls, __func__, [sel, old](Class c){ + return c->cache.shouldFlush(sel, old); + }); adjustCustomFlagsForMethodChange(cls, m); @@ -3951,6 +4087,12 @@ method_setImplementation(Method m, IMP imp) return _method_setImplementation(Nil, m, imp); } +extern void _method_setImplementationRawUnsafe(Method m, IMP imp) +{ + mutex_locker_t lock(runtimeLock); + m->setImp(imp); +} + void method_exchangeImplementations(Method m1, Method m2) { @@ -3958,16 +4100,22 @@ void method_exchangeImplementations(Method m1, Method m2) mutex_locker_t lock(runtimeLock); - IMP m1_imp = m1->imp; - m1->imp = m2->imp; - m2->imp = m1_imp; + IMP imp1 = m1->imp(false); + IMP imp2 = m2->imp(false); + SEL sel1 = m1->name(); + SEL sel2 = m2->name(); + + m1->setImp(imp2); + m2->setImp(imp1); // RR/AWZ updates are slow because class is unknown // Cache updates are slow because class is unknown // fixme build list of classes whose Methods are known externally? - flushCaches(nil); + flushCaches(nil, __func__, [sel1, sel2, imp1, imp2](Class c){ + return c->cache.shouldFlush(sel1, imp1) || c->cache.shouldFlush(sel2, imp2); + }); adjustCustomFlagsForMethodChange(nil, m1); adjustCustomFlagsForMethodChange(nil, m2); @@ -4121,7 +4269,7 @@ fixupProtocolMethodList(protocol_t *proto, method_list_t *mlist, fixupMethodList(mlist, true/*always copy for simplicity*/, !extTypes/*sort if no extended method types*/); - if (extTypes) { + if (extTypes && !mlist->isSmallList()) { // Sort method list and extended method types together. // fixupMethodList() can't do this. // fixme COW stomp @@ -4132,8 +4280,8 @@ fixupProtocolMethodList(protocol_t *proto, method_list_t *mlist, required, instance, prefix, junk); for (uint32_t i = 0; i < count; i++) { for (uint32_t j = i+1; j < count; j++) { - method_t& mi = mlist->get(i); - method_t& mj = mlist->get(j); + auto& mi = mlist->get(i).big(); + auto& mj = mlist->get(j).big(); if (mi.name > mj.name) { std::swap(mi, mj); std::swap(extTypes[prefix+i], extTypes[prefix+j]); @@ -4334,7 +4482,8 @@ _protocol_getMethodTypeEncoding(Protocol *proto_gen, SEL sel, const char * protocol_t::demangledName() { - ASSERT(hasDemangledNameField()); + if (!hasDemangledNameField()) + return mangledName; if (! _demangledName) { char *de = copySwiftV1DemangledName(mangledName, true/*isProtocol*/); @@ -4372,7 +4521,9 @@ protocol_getMethodDescription(Protocol *p, SEL aSel, Method m = protocol_getMethod(newprotocol(p), aSel, isRequiredMethod, isInstanceMethod, true); - if (m) return *method_getDescription(m); + // method_getDescription is inefficient for small methods. Don't bother + // trying to use it, just make our own. + if (m) return (struct objc_method_description){m->name(), (char *)m->types()}; else return (struct objc_method_description){nil, nil}; } @@ -4477,8 +4628,8 @@ protocol_copyMethodDescriptionList(Protocol *p, result = (struct objc_method_description *) calloc(mlist->count + 1, sizeof(struct objc_method_description)); for (const auto& meth : *mlist) { - result[count].name = meth.name; - result[count].types = (char *)meth.types; + result[count].name = meth.name(); + result[count].types = (char *)meth.types(); count++; } } @@ -4763,15 +4914,15 @@ static void protocol_addMethod_nolock(method_list_t*& list, SEL name, const char *types) { if (!list) { - list = (method_list_t *)calloc(sizeof(method_list_t), 1); - list->entsizeAndFlags = sizeof(list->first); + list = (method_list_t *)calloc(method_list_t::byteSize(sizeof(struct method_t::big), 1), 1); + list->entsizeAndFlags = sizeof(struct method_t::big); list->setFixedUp(); } else { size_t size = list->byteSize() + list->entsize(); list = (method_list_t *)realloc(list, size); } - method_t& meth = list->get(list->count++); + auto &meth = list->get(list->count++).big(); meth.name = name; meth.types = types ? strdupIfMutable(types) : ""; meth.imp = nil; @@ -4819,15 +4970,15 @@ protocol_addProperty_nolock(property_list_t *&plist, const char *name, unsigned int count) { if (!plist) { - plist = (property_list_t *)calloc(sizeof(property_list_t), 1); + plist = (property_list_t *)calloc(property_list_t::byteSize(sizeof(property_t), 1), 1); plist->entsizeAndFlags = sizeof(property_t); + plist->count = 1; } else { - plist = (property_list_t *) - realloc(plist, sizeof(property_list_t) - + plist->count * plist->entsize()); + plist->count++; + plist = (property_list_t *)realloc(plist, plist->byteSize()); } - property_t& prop = plist->get(plist->count++); + property_t& prop = plist->get(plist->count - 1); prop.name = strdupIfMutable(name); prop.attributes = copyPropertyAttributeString(attrs, count); } @@ -4918,24 +5069,6 @@ objc_copyRealizedClassList_nolock(unsigned int *outCount) return result; } -static void -class_getImpCache_nolock(Class cls, cache_t &cache, objc_imp_cache_entry *buffer, int len) -{ - bucket_t *buckets = cache.buckets(); - - uintptr_t count = cache.capacity(); - uintptr_t index; - int wpos = 0; - - for (index = 0; index < count && wpos < len; index += 1) { - if (buckets[index].sel()) { - buffer[wpos].imp = buckets[index].imp(cls); - buffer[wpos].sel = buckets[index].sel(); - wpos++; - } - } -} - /*********************************************************************** * objc_getClassList * Returns pointers to all classes. @@ -5015,7 +5148,7 @@ class_copyImpCache(Class cls, int *outCount) if (count) { buffer = (objc_imp_cache_entry *)calloc(1+count, sizeof(objc_imp_cache_entry)); - class_getImpCache_nolock(cls, cache, buffer, count); + cache.copyCacheNolock(buffer, count); } if (outCount) *outCount = count; @@ -5038,7 +5171,7 @@ objc_copyProtocolList(unsigned int *outCount) // Find all the protocols from the pre-optimized images. These protocols // won't be in the protocol map. objc::DenseMap preoptimizedProtocols; - if (sharedCacheSupportsProtocolRoots()) { + { header_info *hi; for (hi = FirstHeader; hi; hi = hi->getNext()) { if (!hi->hasPreoptimizedProtocols()) @@ -5242,9 +5375,9 @@ objc_class::getLoadMethod() mlist = ISA()->data()->ro()->baseMethods(); if (mlist) { for (const auto& meth : *mlist) { - const char *name = sel_cname(meth.name); + const char *name = sel_cname(meth.name()); if (0 == strcmp(name, "load")) { - return meth.imp; + return meth.imp(false); } } } @@ -5312,9 +5445,9 @@ _category_getLoadMethod(Category cat) mlist = cat->classMethods; if (mlist) { for (const auto& meth : *mlist) { - const char *name = sel_cname(meth.name); + const char *name = sel_cname(meth.name()); if (0 == strcmp(name, "load")) { - return meth.imp; + return meth.imp(false); } } } @@ -5461,6 +5594,32 @@ copyClassNamesForImage_nolock(header_info *hi, unsigned int *outCount) return names; } +Class * +copyClassesForImage_nolock(header_info *hi, unsigned int *outCount) +{ + runtimeLock.assertLocked(); + ASSERT(hi); + + size_t count; + classref_t const *classlist = _getObjc2ClassList(hi, &count); + Class *classes = (Class *) + malloc((count+1) * sizeof(Class)); + + size_t shift = 0; + for (size_t i = 0; i < count; i++) { + Class cls = remapClass(classlist[i]); + if (cls) { + classes[i-shift] = cls; + } else { + shift++; // ignored weak-linked class + } + } + count -= shift; + classes[count] = nil; + + if (outCount) *outCount = (unsigned int)count; + return classes; +} /*********************************************************************** @@ -5500,6 +5659,29 @@ objc_copyClassNamesForImage(const char *image, unsigned int *outCount) return copyClassNamesForImage_nolock(hi, outCount); } +Class * +objc_copyClassesForImage(const char *image, unsigned int *outCount) +{ + if (!image) { + if (outCount) *outCount = 0; + return nil; + } + + mutex_locker_t lock(runtimeLock); + + // Find the image. + header_info *hi; + for (hi = FirstHeader; hi != nil; hi = hi->getNext()) { + if (0 == strcmp(image, hi->fname())) break; + } + + if (!hi) { + if (outCount) *outCount = 0; + return nil; + } + + return copyClassesForImage_nolock(hi, outCount); +} /*********************************************************************** * objc_copyClassNamesForImageHeader @@ -5568,7 +5750,7 @@ objc_class::nameForLogging() // Handle the easy case directly. if (isRealized() || isFuture()) { if (!isAnySwift()) { - return data()->ro()->name; + return data()->ro()->getName(); } auto rwe = data()->ext(); if (rwe && rwe->demangledName) { @@ -5578,11 +5760,15 @@ objc_class::nameForLogging() char *result; - const char *name = mangledName(); - char *de = copySwiftV1DemangledName(name); - if (de) result = de; - else result = strdup(name); - + if (isStubClass()) { + asprintf(&result, "", this); + } else if (const char *name = nonlazyMangledName()) { + char *de = copySwiftV1DemangledName(name); + if (de) result = de; + else result = strdup(name); + } else { + asprintf(&result, "", this); + } saveTemporaryString(result); return result; } @@ -5606,8 +5792,8 @@ objc_class::demangledName(bool needsLock) if (isRealized() || isFuture()) { // Swift metaclasses don't have the is-Swift bit. // We can't take this shortcut for them. - if (!isMetaClass() && !isAnySwift()) { - return data()->ro()->name; + if (isFuture() || (!isMetaClass() && !isAnySwift())) { + return data()->ro()->getName(); } auto rwe = data()->ext(); if (rwe && rwe->demangledName) { @@ -5735,30 +5921,32 @@ class_setVersion(Class cls, int version) /*********************************************************************** * search_method_list_inline **********************************************************************/ +template ALWAYS_INLINE static method_t * -findMethodInSortedMethodList(SEL key, const method_list_t *list) +findMethodInSortedMethodList(SEL key, const method_list_t *list, const getNameFunc &getName) { ASSERT(list); - const method_t * const first = &list->first; - const method_t *base = first; - const method_t *probe; + auto first = list->begin(); + auto base = first; + decltype(first) probe; + uintptr_t keyValue = (uintptr_t)key; uint32_t count; for (count = list->count; count != 0; count >>= 1) { probe = base + (count >> 1); - uintptr_t probeValue = (uintptr_t)probe->name; + uintptr_t probeValue = (uintptr_t)getName(probe); if (keyValue == probeValue) { // `probe` is a match. // Rewind looking for the *first* occurrence of this value. // This is required for correct category overrides. - while (probe > first && keyValue == (uintptr_t)probe[-1].name) { + while (probe > first && keyValue == (uintptr_t)getName((probe - 1))) { probe--; } - return (method_t *)probe; + return &*probe; } if (keyValue > probeValue) { @@ -5770,26 +5958,63 @@ findMethodInSortedMethodList(SEL key, const method_list_t *list) return nil; } +ALWAYS_INLINE static method_t * +findMethodInSortedMethodList(SEL key, const method_list_t *list) +{ + if (list->isSmallList()) { + if (CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS && objc::inSharedCache((uintptr_t)list)) { + return findMethodInSortedMethodList(key, list, [](method_t &m) { return m.getSmallNameAsSEL(); }); + } else { + return findMethodInSortedMethodList(key, list, [](method_t &m) { return m.getSmallNameAsSELRef(); }); + } + } else { + return findMethodInSortedMethodList(key, list, [](method_t &m) { return m.big().name; }); + } +} + +template +ALWAYS_INLINE static method_t * +findMethodInUnsortedMethodList(SEL sel, const method_list_t *list, const getNameFunc &getName) +{ + for (auto& meth : *list) { + if (getName(meth) == sel) return &meth; + } + return nil; +} + +ALWAYS_INLINE static method_t * +findMethodInUnsortedMethodList(SEL key, const method_list_t *list) +{ + if (list->isSmallList()) { + if (CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS && objc::inSharedCache((uintptr_t)list)) { + return findMethodInUnsortedMethodList(key, list, [](method_t &m) { return m.getSmallNameAsSEL(); }); + } else { + return findMethodInUnsortedMethodList(key, list, [](method_t &m) { return m.getSmallNameAsSELRef(); }); + } + } else { + return findMethodInUnsortedMethodList(key, list, [](method_t &m) { return m.big().name; }); + } +} + ALWAYS_INLINE static method_t * search_method_list_inline(const method_list_t *mlist, SEL sel) { int methodListIsFixedUp = mlist->isFixedUp(); - int methodListHasExpectedSize = mlist->entsize() == sizeof(method_t); + int methodListHasExpectedSize = mlist->isExpectedSize(); if (fastpath(methodListIsFixedUp && methodListHasExpectedSize)) { return findMethodInSortedMethodList(sel, mlist); } else { // Linear search of unsorted method list - for (auto& meth : *mlist) { - if (meth.name == sel) return &meth; - } + if (auto *m = findMethodInUnsortedMethodList(sel, mlist)) + return m; } #if DEBUG // sanity-check negative results if (mlist->isFixedUp()) { for (auto& meth : *mlist) { - if (meth.name == sel) { + if (meth.name() == sel) { _objc_fatal("linear search worked when binary search did not"); } } @@ -5808,14 +6033,15 @@ search_method_list(const method_list_t *mlist, SEL sel) /*********************************************************************** * method_lists_contains_any **********************************************************************/ +template static NEVER_INLINE bool -method_lists_contains_any(method_list_t * const *mlists, method_list_t * const *end, +method_lists_contains_any(T *mlists, T *end, SEL sels[], size_t selcount) { while (mlists < end) { const method_list_t *mlist = *mlists++; int methodListIsFixedUp = mlist->isFixedUp(); - int methodListHasExpectedSize = mlist->entsize() == sizeof(method_t); + int methodListHasExpectedSize = mlist->entsize() == sizeof(struct method_t::big); if (fastpath(methodListIsFixedUp && methodListHasExpectedSize)) { for (size_t i = 0; i < selcount; i++) { @@ -5824,11 +6050,9 @@ method_lists_contains_any(method_list_t * const *mlists, method_list_t * const * } } } else { - for (auto& meth : *mlist) { - for (size_t i = 0; i < selcount; i++) { - if (meth.name == sels[i]) { - return true; - } + for (size_t i = 0; i < selcount; i++) { + if (findMethodInUnsortedMethodList(sels[i], mlist)) { + return true; } } } @@ -5836,6 +6060,7 @@ method_lists_contains_any(method_list_t * const *mlists, method_list_t * const * return false; } + /*********************************************************************** * getMethodNoSuper_nolock * fixme @@ -5886,7 +6111,7 @@ getMethod_nolock(Class cls, SEL sel) ASSERT(cls->isRealized()); while (cls && ((m = getMethodNoSuper_nolock(cls, sel))) == nil) { - cls = cls->superclass; + cls = cls->getSuperclass(); } return m; @@ -5941,7 +6166,7 @@ static void resolveClassMethod(id inst, SEL sel, Class cls) ASSERT(cls->isRealized()); ASSERT(cls->isMetaClass()); - if (!lookUpImpOrNil(inst, @selector(resolveClassMethod:), cls)) { + if (!lookUpImpOrNilTryCache(inst, @selector(resolveClassMethod:), cls)) { // Resolver not implemented. return; } @@ -5961,7 +6186,7 @@ static void resolveClassMethod(id inst, SEL sel, Class cls) // Cache the result (good or bad) so the resolver doesn't fire next time. // +resolveClassMethod adds to self->ISA() a.k.a. cls - IMP imp = lookUpImpOrNil(inst, sel, cls); + IMP imp = lookUpImpOrNilTryCache(inst, sel, cls); if (resolved && PrintResolving) { if (imp) { @@ -5994,7 +6219,7 @@ static void resolveInstanceMethod(id inst, SEL sel, Class cls) ASSERT(cls->isRealized()); SEL resolve_sel = @selector(resolveInstanceMethod:); - if (!lookUpImpOrNil(cls, resolve_sel, cls->ISA())) { + if (!lookUpImpOrNilTryCache(cls, resolve_sel, cls->ISA(/*authenticated*/true))) { // Resolver not implemented. return; } @@ -6004,7 +6229,7 @@ static void resolveInstanceMethod(id inst, SEL sel, Class cls) // Cache the result (good or bad) so the resolver doesn't fire next time. // +resolveInstanceMethod adds to self a.k.a. cls - IMP imp = lookUpImpOrNil(inst, sel, cls); + IMP imp = lookUpImpOrNilTryCache(inst, sel, cls); if (resolved && PrintResolving) { if (imp) { @@ -6048,14 +6273,14 @@ resolveMethod_locked(id inst, SEL sel, Class cls, int behavior) // try [nonMetaClass resolveClassMethod:sel] // and [cls resolveInstanceMethod:sel] resolveClassMethod(inst, sel, cls); - if (!lookUpImpOrNil(inst, sel, cls)) { + if (!lookUpImpOrNilTryCache(inst, sel, cls)) { resolveInstanceMethod(inst, sel, cls); } } // chances are that calling the resolver have populated the cache // so attempt using it - return lookUpImpOrForward(inst, sel, cls, behavior | LOOKUP_CACHE); + return lookUpImpOrForwardTryCache(inst, sel, cls, behavior); } @@ -6077,22 +6302,94 @@ log_and_fill_cache(Class cls, IMP imp, SEL sel, id receiver, Class implementer) if (!cacheIt) return; } #endif - cache_fill(cls, sel, imp, receiver); + cls->cache.insert(sel, imp, receiver); } /*********************************************************************** -* lookUpImpOrForward. -* The standard IMP lookup. +* realizeAndInitializeIfNeeded_locked +* Realize the given class if not already realized, and initialize it if +* not already initialized. +* inst is an instance of cls or a subclass, or nil if none is known. +* cls is the class to initialize and realize. +* initializer is true to initialize the class, false to skip initialization. +**********************************************************************/ +static Class +realizeAndInitializeIfNeeded_locked(id inst, Class cls, bool initialize) +{ + runtimeLock.assertLocked(); + if (slowpath(!cls->isRealized())) { + cls = realizeClassMaybeSwiftAndLeaveLocked(cls, runtimeLock); + // runtimeLock may have been dropped but is now locked again + } + + if (slowpath(initialize && !cls->isInitialized())) { + cls = initializeAndLeaveLocked(cls, inst, runtimeLock); + // runtimeLock may have been dropped but is now locked again + + // If sel == initialize, class_initialize will send +initialize and + // then the messenger will send +initialize again after this + // procedure finishes. Of course, if this is not being called + // from the messenger then it won't happen. 2778172 + } + return cls; +} + +/*********************************************************************** +* lookUpImpOrForward / lookUpImpOrForwardTryCache / lookUpImpOrNilTryCache +* The standard IMP lookup. +* +* The TryCache variant attempts a fast-path lookup in the IMP Cache. +* Most callers should use lookUpImpOrForwardTryCache with LOOKUP_INITIALIZE +* * Without LOOKUP_INITIALIZE: tries to avoid +initialize (but sometimes fails) -* Without LOOKUP_CACHE: skips optimistic unlocked lookup (but uses cache elsewhere) -* Most callers should use LOOKUP_INITIALIZE and LOOKUP_CACHE -* inst is an instance of cls or a subclass thereof, or nil if none is known. +* With LOOKUP_NIL: returns nil on negative cache hits +* +* inst is an instance of cls or a subclass thereof, or nil if none is known. * If cls is an un-initialized metaclass then a non-nil inst is faster. * May return _objc_msgForward_impcache. IMPs destined for external use * must be converted to _objc_msgForward or _objc_msgForward_stret. * If you don't want forwarding at all, use LOOKUP_NIL. **********************************************************************/ +ALWAYS_INLINE +static IMP _lookUpImpTryCache(id inst, SEL sel, Class cls, int behavior) +{ + runtimeLock.assertUnlocked(); + + if (slowpath(!cls->isInitialized())) { + // see comment in lookUpImpOrForward + return lookUpImpOrForward(inst, sel, cls, behavior); + } + + IMP imp = cache_getImp(cls, sel); + if (imp != NULL) goto done; +#if CONFIG_USE_PREOPT_CACHES + if (fastpath(cls->cache.isConstantOptimizedCache(/* strict */true))) { + imp = cache_getImp(cls->cache.preoptFallbackClass(), sel); + } +#endif + if (slowpath(imp == NULL)) { + return lookUpImpOrForward(inst, sel, cls, behavior); + } + +done: + if ((behavior & LOOKUP_NIL) && imp == (IMP)_objc_msgForward_impcache) { + return nil; + } + return imp; +} + +IMP lookUpImpOrForwardTryCache(id inst, SEL sel, Class cls, int behavior) +{ + return _lookUpImpTryCache(inst, sel, cls, behavior); +} + +IMP lookUpImpOrNilTryCache(id inst, SEL sel, Class cls, int behavior) +{ + return _lookUpImpTryCache(inst, sel, cls, behavior | LOOKUP_NIL); +} + +NEVER_INLINE IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior) { const IMP forward_imp = (IMP)_objc_msgForward_impcache; @@ -6101,10 +6398,21 @@ IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior) runtimeLock.assertUnlocked(); - // Optimistic cache lookup - if (fastpath(behavior & LOOKUP_CACHE)) { - imp = cache_getImp(cls, sel); - if (imp) goto done_nolock; + if (slowpath(!cls->isInitialized())) { + // The first message sent to a class is often +new or +alloc, or +self + // which goes through objc_opt_* or various optimized entry points. + // + // However, the class isn't realized/initialized yet at this point, + // and the optimized entry points fall down through objc_msgSend, + // which ends up here. + // + // We really want to avoid caching these, as it can cause IMP caches + // to be made with a single entry forever. + // + // Note that this check is racy as several threads might try to + // message a given class for the first time at the same time, + // in which case we might cache anyway. + behavior |= LOOKUP_NOCACHE; } // runtimeLock is held during isRealized and isInitialized checking @@ -6124,29 +6432,14 @@ IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior) // To make these harder we want to make sure this is a class that was // either built into the binary or legitimately registered through // objc_duplicateClass, objc_initializeClassPair or objc_allocateClassPair. - // - // TODO: this check is quite costly during process startup. checkIsKnownClass(cls); - if (slowpath(!cls->isRealized())) { - cls = realizeClassMaybeSwiftAndLeaveLocked(cls, runtimeLock); - // runtimeLock may have been dropped but is now locked again - } - - if (slowpath((behavior & LOOKUP_INITIALIZE) && !cls->isInitialized())) { - cls = initializeAndLeaveLocked(cls, inst, runtimeLock); - // runtimeLock may have been dropped but is now locked again - - // If sel == initialize, class_initialize will send +initialize and - // then the messenger will send +initialize again after this - // procedure finishes. Of course, if this is not being called - // from the messenger then it won't happen. 2778172 - } - + cls = realizeAndInitializeIfNeeded_locked(inst, cls, behavior & LOOKUP_INITIALIZE); + // runtimeLock may have been dropped but is now locked again runtimeLock.assertLocked(); curClass = cls; - // The code used to lookpu the class's cache again right after + // The code used to lookup the class's cache again right after // we take the lock but for the vast majority of the cases // evidence shows this is a miss most of the time, hence a time loss. // @@ -6154,18 +6447,26 @@ IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior) // kind of cache lookup is class_getInstanceMethod(). for (unsigned attempts = unreasonableClassCount();;) { - // curClass method list. - Method meth = getMethodNoSuper_nolock(curClass, sel); - if (meth) { - imp = meth->imp; - goto done; - } + if (curClass->cache.isConstantOptimizedCache(/* strict */true)) { +#if CONFIG_USE_PREOPT_CACHES + imp = cache_getImp(curClass, sel); + if (imp) goto done_unlock; + curClass = curClass->cache.preoptFallbackClass(); +#endif + } else { + // curClass method list. + Method meth = getMethodNoSuper_nolock(curClass, sel); + if (meth) { + imp = meth->imp(false); + goto done; + } - if (slowpath((curClass = curClass->superclass) == nil)) { - // No implementation found, and method resolver didn't help. - // Use forwarding. - imp = forward_imp; - break; + if (slowpath((curClass = curClass->getSuperclass()) == nil)) { + // No implementation found, and method resolver didn't help. + // Use forwarding. + imp = forward_imp; + break; + } } // Halt if there is a cycle in the superclass chain. @@ -6195,9 +6496,16 @@ IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior) } done: - log_and_fill_cache(cls, imp, sel, inst, curClass); + if (fastpath((behavior & LOOKUP_NOCACHE) == 0)) { +#if CONFIG_USE_PREOPT_CACHES + while (cls->cache.isConstantOptimizedCache(/* strict */true)) { + cls = cls->cache.preoptFallbackClass(); + } +#endif + log_and_fill_cache(cls, imp, sel, inst, curClass); + } + done_unlock: runtimeLock.unlock(); - done_nolock: if (slowpath((behavior & LOOKUP_NIL) && imp == forward_imp)) { return nil; } @@ -6211,7 +6519,6 @@ IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior) **********************************************************************/ IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel) { - Method meth; IMP imp; // fixme this is incomplete - no resolver, +initialize - @@ -6219,24 +6526,35 @@ IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel) ASSERT(sel == SEL_cxx_construct || sel == SEL_cxx_destruct); // Search cache first. - imp = cache_getImp(cls, sel); - if (imp) return imp; + // + // If the cache used for the lookup is preoptimized, + // we ask for `_objc_msgForward_impcache` to be returned on cache misses, + // so that there's no TOCTOU race between using `isConstantOptimizedCache` + // and calling cache_getImp() when not under the runtime lock. + // + // For dynamic caches, a miss will return `nil` + imp = cache_getImp(cls, sel, _objc_msgForward_impcache); - // Cache miss. Search method list. + if (slowpath(imp == nil)) { + // Cache miss. Search method list. - mutex_locker_t lock(runtimeLock); + mutex_locker_t lock(runtimeLock); - meth = getMethodNoSuper_nolock(cls, sel); + if (auto meth = getMethodNoSuper_nolock(cls, sel)) { + // Hit in method list. Cache it. + imp = meth->imp(false); + } else { + imp = _objc_msgForward_impcache; + } - if (meth) { - // Hit in method list. Cache it. - cache_fill(cls, sel, meth->imp, nil); - return meth->imp; - } else { - // Miss in method list. Cache objc_msgForward. - cache_fill(cls, sel, _objc_msgForward_impcache, nil); - return _objc_msgForward_impcache; + // Note, because we do not hold the runtime lock above + // isConstantOptimizedCache might flip, so we need to double check + if (!cls->cache.isConstantOptimizedCache(true /* strict */)) { + cls->cache.insert(sel, imp, nil); + } } + + return imp; } @@ -6255,7 +6573,7 @@ objc_property_t class_getProperty(Class cls, const char *name) ASSERT(cls->isRealized()); - for ( ; cls; cls = cls->superclass) { + for ( ; cls; cls = cls->getSuperclass()) { for (auto& prop : cls->data()->properties()) { if (0 == strcmp(name, prop.name)) { return (objc_property_t)∝ @@ -6313,6 +6631,15 @@ objc_class::setInitialized() objc::RRScanner::scanInitializedClass(cls, metacls); objc::CoreScanner::scanInitializedClass(cls, metacls); +#if CONFIG_USE_PREOPT_CACHES + cls->cache.maybeConvertToPreoptimized(); + metacls->cache.maybeConvertToPreoptimized(); +#endif + + if (PrintInitializing) { + _objc_inform("INITIALIZE: thread %p: setInitialized(%s)", + objc_thread_self(), cls->nameForLogging()); + } // Update the +initialize flags. // Do this last. metacls->changeInfo(RW_INITIALIZED, RW_INITIALIZING); @@ -6351,6 +6678,59 @@ void objc_class::setInstancesRequireRawIsaRecursively(bool inherited) }); } +#if CONFIG_USE_PREOPT_CACHES +void objc_class::setDisallowPreoptCachesRecursively(const char *why) +{ + Class cls = (Class)this; + runtimeLock.assertLocked(); + + if (!allowsPreoptCaches()) return; + + foreach_realized_class_and_subclass(cls, [=](Class c){ + if (!c->allowsPreoptCaches()) { + return false; + } + + if (c->cache.isConstantOptimizedCache(/* strict */true)) { + c->cache.eraseNolock(why); + } else { + if (PrintCaches) { + _objc_inform("CACHES: %sclass %s: disallow preopt cache (from %s)", + isMetaClass() ? "meta" : "", + nameForLogging(), why); + } + c->setDisallowPreoptCaches(); + } + return true; + }); +} + +void objc_class::setDisallowPreoptInlinedSelsRecursively(const char *why) +{ + Class cls = (Class)this; + runtimeLock.assertLocked(); + + if (!allowsPreoptInlinedSels()) return; + + foreach_realized_class_and_subclass(cls, [=](Class c){ + if (!c->allowsPreoptInlinedSels()) { + return false; + } + + if (PrintCaches) { + _objc_inform("CACHES: %sclass %s: disallow sel-inlined preopt cache (from %s)", + isMetaClass() ? "meta" : "", + nameForLogging(), why); + } + + c->setDisallowPreoptInlinedSels(); + if (c->cache.isConstantOptimizedCacheWithInlinedSels()) { + c->cache.eraseNolock(why); + } + return true; + }); +} +#endif /*********************************************************************** * Choose a class index. @@ -6376,6 +6756,62 @@ void objc_class::chooseClassArrayIndex() #endif } +static const char *empty_lazyClassNamer(Class cls __unused) { + return nullptr; +} + +static ChainedHookFunction LazyClassNamerHook{empty_lazyClassNamer}; + +void objc_setHook_lazyClassNamer(_Nonnull objc_hook_lazyClassNamer newValue, + _Nonnull objc_hook_lazyClassNamer * _Nonnull oldOutValue) { + LazyClassNamerHook.set(newValue, oldOutValue); +} + +const char * objc_class::installMangledNameForLazilyNamedClass() { + auto lazyClassNamer = LazyClassNamerHook.get(); + if (!*lazyClassNamer) { + _objc_fatal("Lazily named class %p with no lazy name handler registered", this); + } + + // If this is called on a metaclass, extract the original class + // and make it do the installation instead. It will install + // the metaclass's name too. + if (isMetaClass()) { + Class nonMeta = bits.safe_ro()->getNonMetaclass(); + return nonMeta->installMangledNameForLazilyNamedClass(); + } + + Class cls = (Class)this; + Class metaclass = ISA(); + + const char *name = lazyClassNamer((Class)this); + if (!name) { + _objc_fatal("Lazily named class %p wasn't named by lazy name handler", this); + } + + // Emplace the name into the class_ro_t. If we lose the race, + // then we'll free our name and use whatever got placed there + // instead of our name. + const char *previously = NULL; + class_ro_t *ro = (class_ro_t *)cls->bits.safe_ro(); + bool wonRace = ro->name.compare_exchange_strong(previously, name, std::memory_order_release, std::memory_order_acquire); + if (!wonRace) { + free((void *)name); + name = previously; + } + + // Emplace whatever name won the race in the metaclass too. + class_ro_t *metaRO = (class_ro_t *)metaclass->bits.safe_ro(); + + // Write our pointer if the current value is NULL. There's no + // need to loop or check success, since the only way this can + // fail is if another thread succeeded in writing the exact + // same pointer. + const char *expected = NULL; + metaRO->name.compare_exchange_strong(expected, name, std::memory_order_release, std::memory_order_acquire); + + return name; +} /*********************************************************************** * Update custom RR and AWZ when a method changes its IMP @@ -6398,7 +6834,7 @@ adjustCustomFlagsForMethodChange(Class cls, method_t *meth) const uint8_t * class_getIvarLayout(Class cls) { - if (cls) return cls->data()->ro()->ivarLayout; + if (cls) return cls->data()->ro()->getIvarLayout(); else return nil; } @@ -6431,6 +6867,8 @@ class_setIvarLayout(Class cls, const uint8_t *layout) { if (!cls) return; + ASSERT(!cls->isMetaClass()); + mutex_locker_t lock(runtimeLock); checkIsKnownClass(cls); @@ -6446,7 +6884,7 @@ class_setIvarLayout(Class cls, const uint8_t *layout) class_ro_t *ro_w = make_ro_writeable(cls->data()); - try_free(ro_w->ivarLayout); + try_free(ro_w->getIvarLayout()); ro_w->ivarLayout = ustrdupMaybeNil(layout); } @@ -6520,7 +6958,7 @@ Class _class_getClassForIvar(Class cls, Ivar ivar) { mutex_locker_t lock(runtimeLock); - for ( ; cls; cls = cls->superclass) { + for ( ; cls; cls = cls->getSuperclass()) { if (auto ivars = cls->data()->ro()->ivars) { if (ivars->containsIvar(ivar)) { return cls; @@ -6542,7 +6980,7 @@ _class_getVariable(Class cls, const char *name) { mutex_locker_t lock(runtimeLock); - for ( ; cls; cls = cls->superclass) { + for ( ; cls; cls = cls->getSuperclass()) { ivar_t *ivar = getIvar(cls, name); if (ivar) { return ivar; @@ -6581,6 +7019,29 @@ BOOL class_conformsToProtocol(Class cls, Protocol *proto_gen) return NO; } +static void +addMethods_finish(Class cls, method_list_t *newlist) +{ + auto rwe = cls->data()->extAllocIfNeeded(); + + if (newlist->count > 1) { + method_t::SortBySELAddress sorter; + std::stable_sort(&newlist->begin()->big(), &newlist->end()->big(), sorter); + } + + prepareMethodLists(cls, &newlist, 1, NO, NO, __func__); + rwe->methods.attachLists(&newlist, 1); + + // If the class being modified has a constant cache, + // then all children classes are flattened constant caches + // and need to be flushed as well. + flushCaches(cls, __func__, [](Class c){ + // constant caches have been dealt with in prepareMethodLists + // if the class still is constant here, it's fine to keep + return !c->cache.isConstantOptimizedCache(); + }); +} + /********************************************************************** * addMethod @@ -6603,27 +7064,23 @@ addMethod(Class cls, SEL name, IMP imp, const char *types, bool replace) if ((m = getMethodNoSuper_nolock(cls, name))) { // already exists if (!replace) { - result = m->imp; + result = m->imp(false); } else { result = _method_setImplementation(cls, m, imp); } } else { - auto rwe = cls->data()->extAllocIfNeeded(); - // fixme optimize method_list_t *newlist; - newlist = (method_list_t *)calloc(sizeof(*newlist), 1); + newlist = (method_list_t *)calloc(method_list_t::byteSize(method_t::bigSize, 1), 1); newlist->entsizeAndFlags = - (uint32_t)sizeof(method_t) | fixed_up_method_list; + (uint32_t)sizeof(struct method_t::big) | fixed_up_method_list; newlist->count = 1; - newlist->first.name = name; - newlist->first.types = strdupIfMutable(types); - newlist->first.imp = imp; - - prepareMethodLists(cls, &newlist, 1, NO, NO); - rwe->methods.attachLists(&newlist, 1); - flushCaches(cls); + auto &first = newlist->begin()->big(); + first.name = name; + first.types = strdupIfMutable(types); + first.imp = imp; + addMethods_finish(cls, newlist); result = nil; } @@ -6650,14 +7107,12 @@ addMethods(Class cls, const SEL *names, const IMP *imps, const char **types, ASSERT(cls->isRealized()); method_list_t *newlist; - size_t newlistSize = method_list_t::byteSize(sizeof(method_t), count); + size_t newlistSize = method_list_t::byteSize(sizeof(struct method_t::big), count); newlist = (method_list_t *)calloc(newlistSize, 1); newlist->entsizeAndFlags = - (uint32_t)sizeof(method_t) | fixed_up_method_list; + (uint32_t)sizeof(struct method_t::big) | fixed_up_method_list; newlist->count = 0; - method_t *newlistMethods = &newlist->first; - SEL *failedNames = nil; uint32_t failedCount = 0; @@ -6673,32 +7128,24 @@ addMethods(Class cls, const SEL *names, const IMP *imps, const char **types, failedNames = (SEL *)calloc(sizeof(*failedNames), count + 1); } - failedNames[failedCount] = m->name; + failedNames[failedCount] = m->name(); failedCount++; } else { _method_setImplementation(cls, m, imps[i]); } } else { - method_t *newmethod = &newlistMethods[newlist->count]; - newmethod->name = names[i]; - newmethod->types = strdupIfMutable(types[i]); - newmethod->imp = imps[i]; + auto &newmethod = newlist->end()->big(); + newmethod.name = names[i]; + newmethod.types = strdupIfMutable(types[i]); + newmethod.imp = imps[i]; newlist->count++; } } if (newlist->count > 0) { - auto rwe = cls->data()->extAllocIfNeeded(); - // fixme resize newlist because it may have been over-allocated above. // Note that realloc() alone doesn't work due to ptrauth. - - method_t::SortBySELAddress sorter; - std::stable_sort(newlist->begin(), newlist->end(), sorter); - - prepareMethodLists(cls, &newlist, 1, NO, NO); - rwe->methods.attachLists(&newlist, 1); - flushCaches(cls); + addMethods_finish(cls, newlist); } else { // Attaching the method list to the class consumes it. If we don't // do that, we have to free the memory ourselves. @@ -6803,7 +7250,7 @@ class_addIvar(Class cls, const char *name, size_t size, memcpy(newlist, oldlist, oldsize); free(oldlist); } else { - newlist = (ivar_list_t *)calloc(sizeof(ivar_list_t), 1); + newlist = (ivar_list_t *)calloc(ivar_list_t::byteSize(sizeof(ivar_t), 1), 1); newlist->entsizeAndFlags = (uint32_t)sizeof(ivar_t); } @@ -6897,11 +7344,11 @@ _class_addProperty(Class cls, const char *name, ASSERT(cls->isRealized()); property_list_t *proplist = (property_list_t *) - malloc(sizeof(*proplist)); + malloc(property_list_t::byteSize(sizeof(property_t), 1)); proplist->count = 1; - proplist->entsizeAndFlags = sizeof(proplist->first); - proplist->first.name = strdupIfMutable(name); - proplist->first.attributes = copyPropertyAttributeString(attrs, count); + proplist->entsizeAndFlags = sizeof(property_t); + proplist->begin()->name = strdupIfMutable(name); + proplist->begin()->attributes = copyPropertyAttributeString(attrs, count); rwe->properties.attachLists(&proplist, 1); @@ -7034,7 +7481,7 @@ objc_duplicateClass(Class original, const char *name, duplicate = alloc_class_for_subclass(original, extraBytes); duplicate->initClassIsa(original->ISA()); - duplicate->superclass = original->superclass; + duplicate->setSuperclass(original->getSuperclass()); duplicate->cache.initializeToEmpty(); @@ -7053,7 +7500,7 @@ objc_duplicateClass(Class original, const char *name, if (orig_rwe) { auto rwe = rw->extAllocIfNeeded(); rwe->version = orig_rwe->version; - rwe->methods = orig_rwe->methods.duplicate(); + orig_rwe->methods.duplicateInto(rwe->methods); // fixme dies when categories are added to the base rwe->properties = orig_rwe->properties; @@ -7066,8 +7513,8 @@ objc_duplicateClass(Class original, const char *name, duplicate->chooseClassArrayIndex(); - if (duplicate->superclass) { - addSubclass(duplicate->superclass, duplicate); + if (duplicate->getSuperclass()) { + addSubclass(duplicate->getSuperclass(), duplicate); // duplicate->isa == original->isa so don't addSubclass() for it } else { addRootClass(duplicate); @@ -7075,7 +7522,7 @@ objc_duplicateClass(Class original, const char *name, // Don't methodize class - construction above is correct - addNamedClass(duplicate, ro->name); + addNamedClass(duplicate, ro->getName()); addClassTableEntry(duplicate, /*addMeta=*/false); if (PrintConnecting) { @@ -7136,8 +7583,8 @@ static void objc_initializeClassPair_internal(Class superclass, const char *name meta->setInstanceSize(meta_ro_w->instanceStart); } - cls_ro_w->name = strdupIfMutable(name); - meta_ro_w->name = strdupIfMutable(name); + cls_ro_w->name.store(strdupIfMutable(name), std::memory_order_release); + meta_ro_w->name.store(strdupIfMutable(name), std::memory_order_release); cls_ro_w->ivarLayout = &UnsetLayout; cls_ro_w->weakIvarLayout = &UnsetLayout; @@ -7160,14 +7607,14 @@ static void objc_initializeClassPair_internal(Class superclass, const char *name if (superclass) { meta->initClassIsa(superclass->ISA()->ISA()); - cls->superclass = superclass; - meta->superclass = superclass->ISA(); + cls->setSuperclass(superclass); + meta->setSuperclass(superclass->ISA()); addSubclass(superclass, cls); addSubclass(superclass->ISA(), meta); } else { meta->initClassIsa(meta); - cls->superclass = Nil; - meta->superclass = cls; + cls->setSuperclass(Nil); + meta->setSuperclass(cls); addRootClass(cls); addSubclass(cls, meta); } @@ -7274,7 +7721,7 @@ void objc_registerClassPair(Class cls) (cls->ISA()->data()->flags & RW_CONSTRUCTED)) { _objc_inform("objc_registerClassPair: class '%s' was already " - "registered!", cls->data()->ro()->name); + "registered!", cls->data()->ro()->getName()); return; } @@ -7283,7 +7730,7 @@ void objc_registerClassPair(Class cls) { _objc_inform("objc_registerClassPair: class '%s' was not " "allocated with objc_allocateClassPair!", - cls->data()->ro()->name); + cls->data()->ro()->getName()); return; } @@ -7292,7 +7739,7 @@ void objc_registerClassPair(Class cls) cls->changeInfo(RW_CONSTRUCTED, RW_CONSTRUCTING | RW_REALIZING); // Add to named class table. - addNamedClass(cls, cls->data()->ro()->name); + addNamedClass(cls, cls->data()->ro()->getName()); } @@ -7315,7 +7762,7 @@ Class objc_readClassPair(Class bits, const struct objc_image_info *info) // Fail if the superclass isn't kosher. bool rootOK = bits->data()->flags & RO_ROOT; - if (!verifySuperclass(bits->superclass, rootOK)){ + if (!verifySuperclass(bits->getSuperclass(), rootOK)){ return nil; } @@ -7354,7 +7801,7 @@ static void detach_class(Class cls, bool isMeta) // superclass's subclass list if (cls->isRealized()) { - Class supercls = cls->superclass; + Class supercls = cls->getSuperclass(); if (supercls) { removeSubclass(supercls, cls); } else { @@ -7386,11 +7833,11 @@ static void free_class(Class cls) auto rwe = rw->ext(); auto ro = rw->ro(); - cache_delete(cls); + cls->cache.destroy(); if (rwe) { for (auto& meth : rwe->methods) { - try_free(meth.types); + try_free(meth.types()); } rwe->methods.tryFree(); } @@ -7415,9 +7862,9 @@ static void free_class(Class cls) rwe->protocols.tryFree(); } - try_free(ro->ivarLayout); + try_free(ro->getIvarLayout()); try_free(ro->weakIvarLayout); - try_free(ro->name); + try_free(ro->getName()); try_free(ro); objc::zfree(rwe); objc::zfree(rw); @@ -7438,25 +7885,25 @@ void objc_disposeClassPair(Class cls) // disposing still-unregistered class is OK! _objc_inform("objc_disposeClassPair: class '%s' was not " "allocated with objc_allocateClassPair!", - cls->data()->ro()->name); + cls->data()->ro()->getName()); return; } if (cls->isMetaClass()) { _objc_inform("objc_disposeClassPair: class '%s' is a metaclass, " - "not a class!", cls->data()->ro()->name); + "not a class!", cls->data()->ro()->getName()); return; } // Shouldn't have any live subclasses. if (cls->data()->firstSubclass) { _objc_inform("objc_disposeClassPair: class '%s' still has subclasses, " - "including '%s'!", cls->data()->ro()->name, + "including '%s'!", cls->data()->ro()->getName(), cls->data()->firstSubclass->nameForLogging()); } if (cls->ISA()->data()->firstSubclass) { _objc_inform("objc_disposeClassPair: class '%s' still has subclasses, " - "including '%s'!", cls->data()->ro()->name, + "including '%s'!", cls->data()->ro()->getName(), cls->ISA()->data()->firstSubclass->nameForLogging()); } @@ -7599,12 +8046,11 @@ class_createInstances(Class cls, size_t extraBytes, static id _object_copyFromZone(id oldObj, size_t extraBytes, void *zone) { - if (!oldObj) return nil; - if (oldObj->isTaggedPointer()) return oldObj; + if (oldObj->isTaggedPointerOrNil()) return oldObj; // fixme this doesn't handle C++ ivars correctly (#4619414) - Class cls = oldObj->ISA(); + Class cls = oldObj->ISA(/*authenticated*/true); size_t size; id obj = _class_createInstanceFromZone(cls, extraBytes, zone, OBJECT_CONSTRUCT_NONE, false, &size); @@ -7679,7 +8125,7 @@ void *objc_destructInstance(id obj) // This order is important. if (cxx) object_cxxDestruct(obj); - if (assoc) _object_remove_assocations(obj); + if (assoc) _object_remove_assocations(obj, /*deallocating*/true); obj->clearDeallocating(); } @@ -7764,6 +8210,8 @@ unsigned objc_debug_taggedpointer_ext_payload_lshift = 0; unsigned objc_debug_taggedpointer_ext_payload_rshift = 0; Class objc_debug_taggedpointer_ext_classes[1] = { nil }; +uintptr_t objc_debug_constant_cfstring_tag_bits = 0; + static void disableTaggedPointers() { } @@ -7791,6 +8239,13 @@ unsigned objc_debug_taggedpointer_ext_payload_lshift = _OBJC_TAG_EXT_PAYLOAD_LS unsigned objc_debug_taggedpointer_ext_payload_rshift = _OBJC_TAG_EXT_PAYLOAD_RSHIFT; // objc_debug_taggedpointer_ext_classes is defined in objc-msg-*.s +#if OBJC_SPLIT_TAGGED_POINTERS +uint8_t objc_debug_tag60_permutations[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; +uintptr_t objc_debug_constant_cfstring_tag_bits = _OBJC_TAG_EXT_MASK | ((uintptr_t)(OBJC_TAG_Constant_CFString - OBJC_TAG_First52BitPayload) << _OBJC_TAG_EXT_SLOT_SHIFT); +#else +uintptr_t objc_debug_constant_cfstring_tag_bits = 0; +#endif + static void disableTaggedPointers() { @@ -7813,15 +8268,21 @@ disableTaggedPointers() static Class * classSlotForBasicTagIndex(objc_tag_index_t tag) { +#if OBJC_SPLIT_TAGGED_POINTERS + uintptr_t obfuscatedTag = _objc_basicTagToObfuscatedTag(tag); + return &objc_tag_classes[obfuscatedTag]; +#else uintptr_t tagObfuscator = ((objc_debug_taggedpointer_obfuscator >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK); uintptr_t obfuscatedTag = tag ^ tagObfuscator; + // Array index in objc_tag_classes includes the tagged bit itself -#if SUPPORT_MSB_TAGGED_POINTERS +# if SUPPORT_MSB_TAGGED_POINTERS return &objc_tag_classes[0x8 | obfuscatedTag]; -#else +# else return &objc_tag_classes[(obfuscatedTag << 1) | 1]; +# endif #endif } @@ -7837,6 +8298,10 @@ classSlotForTagIndex(objc_tag_index_t tag) if (tag >= OBJC_TAG_First52BitPayload && tag <= OBJC_TAG_Last52BitPayload) { int index = tag - OBJC_TAG_First52BitPayload; +#if OBJC_SPLIT_TAGGED_POINTERS + if (tag >= OBJC_TAG_FirstUnobfuscatedSplitTag) + return &objc_tag_ext_classes[index]; +#endif uintptr_t tagObfuscator = ((objc_debug_taggedpointer_obfuscator >> _OBJC_TAG_EXT_INDEX_SHIFT) & _OBJC_TAG_EXT_INDEX_MASK); @@ -7860,16 +8325,28 @@ classSlotForTagIndex(objc_tag_index_t tag) static void initializeTaggedPointerObfuscator(void) { - if (sdkIsOlderThan(10_14, 12_0, 12_0, 5_0, 3_0) || - // Set the obfuscator to zero for apps linked against older SDKs, - // in case they're relying on the tagged pointer representation. - DisableTaggedPointerObfuscation) { - objc_debug_taggedpointer_obfuscator = 0; - } else { + if (!DisableTaggedPointerObfuscation && dyld_program_sdk_at_least(dyld_fall_2018_os_versions)) { // Pull random data into the variable, then shift away all non-payload bits. arc4random_buf(&objc_debug_taggedpointer_obfuscator, sizeof(objc_debug_taggedpointer_obfuscator)); objc_debug_taggedpointer_obfuscator &= ~_OBJC_TAG_MASK; + +#if OBJC_SPLIT_TAGGED_POINTERS + // The obfuscator doesn't apply to any of the extended tag mask or the no-obfuscation bit. + objc_debug_taggedpointer_obfuscator &= ~(_OBJC_TAG_EXT_MASK | _OBJC_TAG_NO_OBFUSCATION_MASK); + + // Shuffle the first seven entries of the tag permutator. + int max = 7; + for (int i = max - 1; i >= 0; i--) { + int target = arc4random_uniform(i + 1); + swap(objc_debug_tag60_permutations[i], + objc_debug_tag60_permutations[target]); + } +#endif + } else { + // Set the obfuscator to zero for apps linked against older SDKs, + // in case they're relying on the tagged pointer representation. + objc_debug_taggedpointer_obfuscator = 0; } } @@ -8017,19 +8494,19 @@ static Class setSuperclass(Class cls, Class newSuper) ASSERT(cls->isRealized()); ASSERT(newSuper->isRealized()); - oldSuper = cls->superclass; + oldSuper = cls->getSuperclass(); removeSubclass(oldSuper, cls); removeSubclass(oldSuper->ISA(), cls->ISA()); - cls->superclass = newSuper; - cls->ISA()->superclass = newSuper->ISA(); + cls->setSuperclass(newSuper); + cls->ISA()->setSuperclass(newSuper->ISA(/*authenticated*/true)); addSubclass(newSuper, cls); addSubclass(newSuper->ISA(), cls->ISA()); // Flush subclass's method caches. - flushCaches(cls); - flushCaches(cls->ISA()); - + flushCaches(cls, __func__, [](Class c){ return true; }); + flushCaches(cls->ISA(), __func__, [](Class c){ return true; }); + return oldSuper; } diff --git a/runtime/objc-runtime.mm b/runtime/objc-runtime.mm index 08a1b77..e38b274 100644 --- a/runtime/objc-runtime.mm +++ b/runtime/objc-runtime.mm @@ -33,6 +33,7 @@ * Imports. **********************************************************************/ +#include // os_feature_enabled_simple() #include "objc-private.h" #include "objc-loadmethod.h" #include "objc-file.h" @@ -87,6 +88,9 @@ const option_t Settings[] = { #undef OPTION }; +namespace objc { + int PageCountWarning = 50; // Default value if the environment variable is not set +} // objc's key for pthread_getspecific #if SUPPORT_DIRECT_THREAD_KEYS @@ -338,6 +342,22 @@ void removeHeader(header_info *hi) #endif } +/*********************************************************************** +* SetPageCountWarning +* Convert environment variable value to integer value. +* If the value is valid, set the global PageCountWarning value. +**********************************************************************/ +void SetPageCountWarning(const char* envvar) { + if (envvar) { + long result = strtol(envvar, NULL, 10); + if (result <= INT_MAX && result >= -1) { + int32_t var = (int32_t)result; + if (var != 0) { // 0 is not a valid value for the env var + objc::PageCountWarning = var; + } + } + } +} /*********************************************************************** * environ_init @@ -352,6 +372,13 @@ void environ_init(void) return; } + // Turn off autorelease LRU coalescing by default for apps linked against + // older SDKs. LRU coalescing can reorder releases and certain older apps + // are accidentally relying on the ordering. + // rdar://problem/63886091 + if (!dyld_program_sdk_at_least(dyld_fall_2020_os_versions)) + DisableAutoreleaseCoalescingLRU = true; + bool PrintHelp = false; bool PrintOptions = false; bool maybeMallocDebugging = false; @@ -376,6 +403,11 @@ void environ_init(void) continue; } + if (0 == strncmp(*p, "OBJC_DEBUG_POOL_DEPTH=", 22)) { + SetPageCountWarning(*p + 22); + continue; + } + const char *value = strchr(*p, '='); if (!*value) continue; value++; @@ -388,10 +420,10 @@ void environ_init(void) *opt->var = (0 == strcmp(value, "YES")); break; } - } + } } - // Special case: enable some autorelease pool debugging + // Special case: enable some autorelease pool debugging // when some malloc debugging is enabled // and OBJC_DEBUG_POOL_ALLOCATION is not set to something other than NO. if (maybeMallocDebugging) { @@ -409,6 +441,10 @@ void environ_init(void) } } + if (!os_feature_enabled_simple(objc4, preoptimizedCaches, true)) { + DisablePreoptCaches = true; + } + // Print OBJC_HELP and OBJC_PRINT_OPTIONS output. if (PrintHelp || PrintOptions) { if (PrintHelp) { @@ -649,31 +685,25 @@ objc_getAssociatedObject(id object, const void *key) return _object_get_associative_reference(object, key); } -static void -_base_objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy) -{ - _object_set_associative_reference(object, key, value, policy); -} - -static ChainedHookFunction SetAssocHook{_base_objc_setAssociatedObject}; +typedef void (*objc_hook_setAssociatedObject)(id _Nonnull object, const void * _Nonnull key, + id _Nullable value, objc_AssociationPolicy policy); void objc_setHook_setAssociatedObject(objc_hook_setAssociatedObject _Nonnull newValue, objc_hook_setAssociatedObject _Nullable * _Nonnull outOldValue) { - SetAssocHook.set(newValue, outOldValue); + // See objc_object::setHasAssociatedObjects() for a replacement } void objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy) { - SetAssocHook.get()(object, key, value, policy); + _object_set_associative_reference(object, key, value, policy); } - void objc_removeAssociatedObjects(id object) { if (object && object->hasAssociatedObjects()) { - _object_remove_assocations(object); + _object_remove_assocations(object, /*deallocating*/false); } } diff --git a/runtime/objc-sel-old.mm b/runtime/objc-sel-old.mm index 2a3a242..02fc2b5 100644 --- a/runtime/objc-sel-old.mm +++ b/runtime/objc-sel-old.mm @@ -33,11 +33,6 @@ #include "objc-private.h" #include "objc-sel-set.h" -#if SUPPORT_PREOPT -#include -static const objc_selopt_t *builtins = NULL; -#endif - __BEGIN_DECLS static size_t SelrefCount = 0; @@ -55,10 +50,6 @@ static SEL _objc_search_builtins(const char *key) if (!key) return (SEL)0; if ('\0' == *key) return (SEL)_objc_empty_selector; -#if SUPPORT_PREOPT - if (builtins) return (SEL)builtins->get(key); -#endif - return (SEL)0; } @@ -151,10 +142,6 @@ void sel_init(size_t selrefCount) // save this value for later SelrefCount = selrefCount; -#if SUPPORT_PREOPT - builtins = preoptimizedSelectors(); -#endif - // Register selectors used by libobjc mutex_locker_t lock(selLock); diff --git a/runtime/objc-sel-set.mm b/runtime/objc-sel-set.mm index ab21b00..0fcf6f6 100644 --- a/runtime/objc-sel-set.mm +++ b/runtime/objc-sel-set.mm @@ -120,7 +120,7 @@ struct __objc_sel_set *__objc_sel_set_create(size_t selrefs) { sset->_count = 0; // heuristic to convert executable's selrefs count to table size -#if TARGET_OS_IPHONE && !TARGET_OS_IOSMAC +#if TARGET_OS_IPHONE && !TARGET_OS_MACCATALYST for (idx = 0; __objc_sel_set_capacities[idx] < selrefs; idx++); if (idx > 0 && selrefs < 1536) idx--; #else diff --git a/runtime/objc-sel-table.S b/runtime/objc-sel-table.S index 6d9710d..3fb517a 100644 --- a/runtime/objc-sel-table.S +++ b/runtime/objc-sel-table.S @@ -2,7 +2,12 @@ #include #if __LP64__ +#if __arm64e__ +// 0x6AE1 +# define PTR(x) .quad x@AUTH(da, 27361, addr) +#else # define PTR(x) .quad x +#endif #else # define PTR(x) .long x #endif diff --git a/runtime/objc-sel.mm b/runtime/objc-sel.mm index 27ee356..a8623d8 100644 --- a/runtime/objc-sel.mm +++ b/runtime/objc-sel.mm @@ -24,15 +24,8 @@ #if __OBJC2__ #include "objc-private.h" -#include "objc-cache.h" #include "DenseMapExtras.h" -#if SUPPORT_PREOPT -static const objc_selopt_t *builtins = NULL; -static bool useDyldSelectorLookup = false; -#endif - - static objc::ExplicitInitDenseSet namedSelectors; static SEL search_builtins(const char *key); @@ -44,32 +37,13 @@ static SEL search_builtins(const char *key); void sel_init(size_t selrefCount) { #if SUPPORT_PREOPT - // If dyld finds a known shared cache selector, then it must be also looking - // in the shared cache table. - if (_dyld_get_objc_selector("retain") != nil) - useDyldSelectorLookup = true; - else - builtins = preoptimizedSelectors(); - - if (PrintPreopt && useDyldSelectorLookup) { + if (PrintPreopt) { _objc_inform("PREOPTIMIZATION: using dyld selector opt"); } - - if (PrintPreopt && builtins) { - uint32_t occupied = builtins->occupied; - uint32_t capacity = builtins->capacity; - - _objc_inform("PREOPTIMIZATION: using selopt at %p", builtins); - _objc_inform("PREOPTIMIZATION: %u selectors", occupied); - _objc_inform("PREOPTIMIZATION: %u/%u (%u%%) hash table occupancy", - occupied, capacity, - (unsigned)(occupied/(double)capacity*100)); - } - namedSelectors.init(useDyldSelectorLookup ? 0 : (unsigned)selrefCount); -#else - namedSelectors.init((unsigned)selrefCount); #endif + namedSelectors.init((unsigned)selrefCount); + // Register selectors used by libobjc mutex_locker_t lock(selLock); @@ -93,6 +67,16 @@ const char *sel_getName(SEL sel) } +unsigned long sel_hash(SEL sel) +{ + unsigned long selAddr = (unsigned long)sel; +#if CONFIG_USE_PREOPT_CACHES + selAddr ^= (selAddr >> 7); +#endif + return selAddr; +} + + BOOL sel_isMapped(SEL sel) { if (!sel) return NO; @@ -110,17 +94,8 @@ BOOL sel_isMapped(SEL sel) static SEL search_builtins(const char *name) { #if SUPPORT_PREOPT - if (builtins) { - SEL result = 0; - if ((result = (SEL)builtins->get(name))) - return result; - - if ((result = (SEL)_dyld_get_objc_selector(name))) - return result; - } else if (useDyldSelectorLookup) { - if (SEL result = (SEL)_dyld_get_objc_selector(name)) - return result; - } + if (SEL result = (SEL)_dyld_get_objc_selector(name)) + return result; #endif return nil; } diff --git a/runtime/objc-weak.h b/runtime/objc-weak.h index 8c50050..535fc88 100644 --- a/runtime/objc-weak.h +++ b/runtime/objc-weak.h @@ -123,9 +123,15 @@ struct weak_table_t { uintptr_t max_hash_displacement; }; +enum WeakRegisterDeallocatingOptions { + ReturnNilIfDeallocating, + CrashIfDeallocating, + DontCheckDeallocating +}; + /// Adds an (object, weak pointer) pair to the weak table. id weak_register_no_lock(weak_table_t *weak_table, id referent, - id *referrer, bool crashIfDeallocating); + id *referrer, WeakRegisterDeallocatingOptions deallocatingOptions); /// Removes an (object, weak pointer) pair from the weak table. void weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer); diff --git a/runtime/objc-weak.mm b/runtime/objc-weak.mm index 4d9c43d..3289953 100644 --- a/runtime/objc-weak.mm +++ b/runtime/objc-weak.mm @@ -389,38 +389,43 @@ weak_unregister_no_lock(weak_table_t *weak_table, id referent_id, */ id weak_register_no_lock(weak_table_t *weak_table, id referent_id, - id *referrer_id, bool crashIfDeallocating) + id *referrer_id, WeakRegisterDeallocatingOptions deallocatingOptions) { objc_object *referent = (objc_object *)referent_id; objc_object **referrer = (objc_object **)referrer_id; - if (!referent || referent->isTaggedPointer()) return referent_id; + if (referent->isTaggedPointerOrNil()) return referent_id; // ensure that the referenced object is viable - bool deallocating; - if (!referent->ISA()->hasCustomRR()) { - deallocating = referent->rootIsDeallocating(); - } - else { - BOOL (*allowsWeakReference)(objc_object *, SEL) = - (BOOL(*)(objc_object *, SEL)) - object_getMethodImplementation((id)referent, - @selector(allowsWeakReference)); - if ((IMP)allowsWeakReference == _objc_msgForward) { - return nil; + if (deallocatingOptions == ReturnNilIfDeallocating || + deallocatingOptions == CrashIfDeallocating) { + bool deallocating; + if (!referent->ISA()->hasCustomRR()) { + deallocating = referent->rootIsDeallocating(); } - deallocating = + else { + // Use lookUpImpOrForward so we can avoid the assert in + // class_getInstanceMethod, since we intentionally make this + // callout with the lock held. + auto allowsWeakReference = (BOOL(*)(objc_object *, SEL)) + lookUpImpOrForwardTryCache((id)referent, @selector(allowsWeakReference), + referent->getIsa()); + if ((IMP)allowsWeakReference == _objc_msgForward) { + return nil; + } + deallocating = ! (*allowsWeakReference)(referent, @selector(allowsWeakReference)); - } + } - if (deallocating) { - if (crashIfDeallocating) { - _objc_fatal("Cannot form weak reference to instance (%p) of " - "class %s. It is possible that this object was " - "over-released, or is in the process of deallocation.", - (void*)referent, object_getClassName((id)referent)); - } else { - return nil; + if (deallocating) { + if (deallocatingOptions == CrashIfDeallocating) { + _objc_fatal("Cannot form weak reference to instance (%p) of " + "class %s. It is possible that this object was " + "over-released, or is in the process of deallocation.", + (void*)referent, object_getClassName((id)referent)); + } else { + return nil; + } } } diff --git a/runtime/objc.h b/runtime/objc.h index 6a73568..9e22d90 100644 --- a/runtime/objc.h +++ b/runtime/objc.h @@ -67,7 +67,7 @@ typedef id _Nullable (*IMP)(id _Nonnull, SEL _Nonnull, ...); # endif #else // __OBJC_BOOL_IS_BOOL not set. -# if TARGET_OS_OSX || TARGET_OS_IOSMAC || ((TARGET_OS_IOS || TARGET_OS_BRIDGE) && !__LP64__ && !__ARM_ARCH_7K) +# if TARGET_OS_OSX || TARGET_OS_MACCATALYST || ((TARGET_OS_IOS || TARGET_OS_BRIDGE) && !__LP64__ && !__ARM_ARCH_7K) # define OBJC_BOOL_IS_BOOL 0 # else # define OBJC_BOOL_IS_BOOL 1 @@ -180,8 +180,7 @@ OBJC_EXPORT const char * _Nonnull object_getClassName(id _Nullable obj) * @note In a garbage-collected environment, the memory is scanned conservatively. */ OBJC_EXPORT void * _Nullable object_getIndexedIvars(id _Nullable obj) - OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0, 2.0) - OBJC_ARC_UNAVAILABLE; + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0, 2.0); /** * Identifies a selector as being valid or invalid. diff --git a/runtime/runtime.h b/runtime/runtime.h index c97129b..67145bd 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -1767,43 +1767,6 @@ OBJC_EXPORT void objc_setHook_getClass(objc_hook_getClass _Nonnull newValue, OBJC_AVAILABLE(10.14.4, 12.2, 12.2, 5.2, 3.2); #endif -/** - * Function type for a hook that assists objc_setAssociatedObject(). - * - * @param object The source object for the association. - * @param key The key for the association. - * @param value The value to associate with the key key for object. Pass nil to clear an existing association. - * @param policy The policy for the association. For possible values, see “Associative Object Behaviors.” - * - * @see objc_setAssociatedObject - * @see objc_setHook_setAssociatedObject - */ -typedef void (*objc_hook_setAssociatedObject)(id _Nonnull object, const void * _Nonnull key, - id _Nullable value, objc_AssociationPolicy policy); - -/** - * Install a hook for objc_setAssociatedObject(). - * - * @param newValue The hook function to install. - * @param outOldValue The address of a function pointer variable. On return, - * the old hook function is stored in the variable. - * - * @note The store to *outOldValue is thread-safe: the variable will be - * updated before objc_setAssociatedObject() calls your new hook to read it, - * even if your new hook is called from another thread before this - * setter completes. - * @note Your hook should always call the previous hook. - * - * @see objc_setAssociatedObject - * @see objc_hook_setAssociatedObject - */ -#if !(TARGET_OS_OSX && __i386__) -#define OBJC_SETASSOCIATEDOBJECTHOOK_DEFINED 1 -OBJC_EXPORT void objc_setHook_setAssociatedObject(objc_hook_setAssociatedObject _Nonnull newValue, - objc_hook_setAssociatedObject _Nullable * _Nonnull outOldValue) - OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0); -#endif - /** * Function type for a function that is called when an image is loaded. * @@ -1831,7 +1794,39 @@ typedef void (*objc_func_loadImage)(const struct mach_header * _Nonnull header); OBJC_EXPORT void objc_addLoadImageFunc(objc_func_loadImage _Nonnull func) OBJC_AVAILABLE(10.15, 13.0, 13.0, 6.0, 4.0); -/** +/** + * Function type for a hook that provides a name for lazily named classes. + * + * @param cls The class to generate a name for. + * @return The name of the class, or NULL if the name isn't known or can't me generated. + * + * @see objc_setHook_lazyClassNamer + */ +typedef const char * _Nullable (*objc_hook_lazyClassNamer)(_Nonnull Class cls); + +/** + * Install a hook to provide a name for lazily-named classes. + * + * @param newValue The hook function to install. + * @param outOldValue The address of a function pointer variable. On return, + * the old hook function is stored in the variable. + * + * @note The store to *outOldValue is thread-safe: the variable will be + * updated before objc_getClass() calls your new hook to read it, + * even if your new hook is called from another thread before this + * setter completes. + * @note Your hook must call the previous hook for class names + * that you do not recognize. + */ +#if !(TARGET_OS_OSX && __i386__) +#define OBJC_SETHOOK_LAZYCLASSNAMER_DEFINED 1 +OBJC_EXPORT +void objc_setHook_lazyClassNamer(_Nonnull objc_hook_lazyClassNamer newValue, + _Nonnull objc_hook_lazyClassNamer * _Nonnull oldOutValue) + OBJC_AVAILABLE(10.16, 14.0, 14.0, 7.0, 5.0); +#endif + +/** * Callback from Objective-C to Swift to perform Swift class initialization. */ #if !(TARGET_OS_OSX && __i386__) diff --git a/test/gcfiles/x86_64-nogc b/test/.DS_Store similarity index 62% rename from test/gcfiles/x86_64-nogc rename to test/.DS_Store index 4829b41..5008ddf 100644 Binary files a/test/gcfiles/x86_64-nogc and b/test/.DS_Store differ diff --git a/test/asm-placeholder.s b/test/asm-placeholder.S similarity index 100% rename from test/asm-placeholder.s rename to test/asm-placeholder.S diff --git a/test/association.m b/test/association.m index e148fc5..35f81f4 100644 --- a/test/association.m +++ b/test/association.m @@ -3,6 +3,8 @@ #include "test.h" #include #include +#include +#include static int values; static int supers; @@ -85,6 +87,100 @@ static const char *key = "key"; } @end +@interface Sub59318867: NSObject @end +@implementation Sub59318867 ++ (void)initialize { + objc_setAssociatedObject(self, &key, self, OBJC_ASSOCIATION_ASSIGN); +} +@end + +@interface CallOnDealloc: NSObject @end +@implementation CallOnDealloc { + void (^_block)(void); +} +- (id)initWithBlock: (void (^)(void))block { + _block = (__bridge id)Block_copy((__bridge void *)block); + return self; +} +- (void)dealloc { + _block(); + _Block_release((__bridge void *)_block); + SUPER_DEALLOC(); +} +@end + +void TestReleaseLater(void) { + int otherObjsCount = 100; + char keys1[otherObjsCount]; + char keys2[otherObjsCount]; + char laterKey; + + __block int normalDeallocs = 0; + __block int laterDeallocs = 0; + + { + id target = [NSObject new]; + for (int i = 0; i < otherObjsCount; i++) { + id value = [[CallOnDealloc alloc] initWithBlock: ^{ normalDeallocs++; }]; + objc_setAssociatedObject(target, keys1 + i, value, OBJC_ASSOCIATION_RETAIN); + RELEASE_VALUE(value); + } + { + id laterValue = [[CallOnDealloc alloc] initWithBlock: ^{ + testassertequal(laterDeallocs, 0); + testassertequal(normalDeallocs, otherObjsCount * 2); + laterDeallocs++; + }]; + objc_setAssociatedObject(target, &laterKey, laterValue, (objc_AssociationPolicy)(OBJC_ASSOCIATION_RETAIN | _OBJC_ASSOCIATION_SYSTEM_OBJECT)); + RELEASE_VALUE(laterValue); + } + for (int i = 0; i < otherObjsCount; i++) { + id value = [[CallOnDealloc alloc] initWithBlock: ^{ normalDeallocs++; }]; + objc_setAssociatedObject(target, keys2 + i, value, OBJC_ASSOCIATION_RETAIN); + RELEASE_VALUE(value); + } + RELEASE_VALUE(target); + } + testassertequal(laterDeallocs, 1); + testassertequal(normalDeallocs, otherObjsCount * 2); +} + +void TestReleaseLaterRemoveAssociations(void) { + + char normalKey; + char laterKey; + + __block int normalDeallocs = 0; + __block int laterDeallocs = 0; + + @autoreleasepool { + id target = [NSObject new]; + { + id normalValue = [[CallOnDealloc alloc] initWithBlock: ^{ normalDeallocs++; }]; + id laterValue = [[CallOnDealloc alloc] initWithBlock: ^{ laterDeallocs++; }]; + objc_setAssociatedObject(target, &normalKey, normalValue, OBJC_ASSOCIATION_RETAIN); + objc_setAssociatedObject(target, &laterKey, laterValue, (objc_AssociationPolicy)(OBJC_ASSOCIATION_RETAIN | _OBJC_ASSOCIATION_SYSTEM_OBJECT)); + RELEASE_VALUE(normalValue); + RELEASE_VALUE(laterValue); + } + testassertequal(normalDeallocs, 0); + testassertequal(laterDeallocs, 0); + + objc_removeAssociatedObjects(target); + testassertequal(normalDeallocs, 1); + testassertequal(laterDeallocs, 0); + + id normalValue = objc_getAssociatedObject(target, &normalKey); + id laterValue = objc_getAssociatedObject(target, &laterKey); + testassert(!normalValue); + testassert(laterValue); + + RELEASE_VALUE(target); + } + + testassertequal(normalDeallocs, 1); + testassertequal(laterDeallocs, 1); +} int main() { @@ -123,5 +219,13 @@ int main() objc_setAssociatedObject(nil, &key, nil, OBJC_ASSOCIATION_ASSIGN); #pragma clang diagnostic pop + // rdar://problem/59318867 Make sure we don't reenter the association lock + // when setting an associated object on an uninitialized class. + Class Sub59318867Local = objc_getClass("Sub59318867"); + objc_setAssociatedObject(Sub59318867Local, &key, Sub59318867Local, OBJC_ASSOCIATION_ASSIGN); + + TestReleaseLater(); + TestReleaseLaterRemoveAssociations(); + succeed(__FILE__); } diff --git a/test/badPoolCompat-ios-tvos.m b/test/badPoolCompat-ios-tvos.m deleted file mode 100644 index 5f1b92c..0000000 --- a/test/badPoolCompat-ios-tvos.m +++ /dev/null @@ -1,14 +0,0 @@ -// Run test badPool as if it were built with an old SDK. - -// TEST_CONFIG MEM=mrc OS=iphoneos,iphonesimulator,appletvos,appletvsimulator -// TEST_CRASHES -// TEST_CFLAGS -DOLD=1 -Xlinker -sdk_version -Xlinker 9.0 - -/* -TEST_RUN_OUTPUT -objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .* -OK: badPool.m -END -*/ - -#include "badPool.m" diff --git a/test/badPoolCompat-ios.m b/test/badPoolCompat-ios.m new file mode 100644 index 0000000..a5f684f --- /dev/null +++ b/test/badPoolCompat-ios.m @@ -0,0 +1,18 @@ +// Run test badPool as if it were built with an old SDK. + +// TEST_CONFIG MEM=mrc OS=iphoneos,iphonesimulator ARCH=x86_64,arm64 +// TEST_CRASHES +// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker ios -Xlinker 9.0 -Xlinker 9.0 -miphoneos-version-min=9.0 + +/* +TEST_BUILD_OUTPUT +ld: warning: passed two min versions.*for platform.* +END + +TEST_RUN_OUTPUT +objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .* +OK: badPool.m +END +*/ + +#include "badPool.m" diff --git a/test/badPoolCompat-macos.m b/test/badPoolCompat-macos.m index afd2117..1131c83 100644 --- a/test/badPoolCompat-macos.m +++ b/test/badPoolCompat-macos.m @@ -1,10 +1,14 @@ // Run test badPool as if it were built with an old SDK. -// TEST_CONFIG MEM=mrc OS=macosx +// TEST_CONFIG MEM=mrc OS=macosx ARCH=x86_64 // TEST_CRASHES -// TEST_CFLAGS -DOLD=1 -Xlinker -sdk_version -Xlinker 10.11 +// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker macos -Xlinker 10.11 -Xlinker 10.11 -mmacosx-version-min=10.11 /* +TEST_BUILD_OUTPUT +ld: warning: passed two min versions.*for platform.* +END + TEST_RUN_OUTPUT objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .* OK: badPool.m diff --git a/test/badPoolCompat-tvos.m b/test/badPoolCompat-tvos.m new file mode 100644 index 0000000..3adfacd --- /dev/null +++ b/test/badPoolCompat-tvos.m @@ -0,0 +1,18 @@ +// Run test badPool as if it were built with an old SDK. + +// TEST_CONFIG MEM=mrc OS=appletvos,appletvsimulator ARCH=x86_64,arm64 +// TEST_CRASHES +// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker tvos -Xlinker 9.0 -Xlinker 9.0 -mtvos-version-min=9.0 + +/* +TEST_BUILD_OUTPUT +ld: warning: passed two min versions.*for platform.* +END + +TEST_RUN_OUTPUT +objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .* +OK: badPool.m +END +*/ + +#include "badPool.m" diff --git a/test/badPoolCompat-watchos.m b/test/badPoolCompat-watchos.m index 6e89e44..19e8ca7 100644 --- a/test/badPoolCompat-watchos.m +++ b/test/badPoolCompat-watchos.m @@ -2,9 +2,13 @@ // TEST_CONFIG MEM=mrc OS=watchos,watchsimulator // TEST_CRASHES -// TEST_CFLAGS -DOLD=1 -Xlinker -sdk_version -Xlinker 2.0 +// TEST_CFLAGS -DOLD=1 -Xlinker -platform_version -Xlinker watchos -Xlinker 2.0 -Xlinker 2.0 -mwatchos-version-min=2.0 /* +TEST_BUILD_OUTPUT +ld: warning: passed two min versions.*for platform.* +END + TEST_RUN_OUTPUT objc\[\d+\]: Invalid or prematurely-freed autorelease pool 0x[0-9a-fA-f]+\. Set a breakpoint .* Proceeding anyway .* OK: badPool.m diff --git a/test/badSuperclass.m b/test/badSuperclass.m index 2fa0bc7..2ac22b5 100644 --- a/test/badSuperclass.m +++ b/test/badSuperclass.m @@ -26,7 +26,7 @@ int main() // Create a cycle in a superclass chain (Sub->supercls == Sub) // then attempt to walk that chain. Runtime should halt eventually. _objc_flush_caches(supercls); - ((Class *)(__bridge void *)subcls)[1] = subcls; + ((Class __ptrauth_objc_super_pointer *)(__bridge void *)subcls)[1] = subcls; #ifdef CACHE_FLUSH _objc_flush_caches(supercls); #else diff --git a/test/bigrc.m b/test/bigrc.m index 419bbb6..3918d8f 100644 --- a/test/bigrc.m +++ b/test/bigrc.m @@ -1,13 +1,4 @@ // TEST_CONFIG MEM=mrc -/* -TEST_RUN_OUTPUT -objc\[\d+\]: Deallocator object 0x[0-9a-fA-F]+ overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug -OK: bigrc.m -OR -no overrelease enforcement -OK: bigrc.m -END - */ #include "test.h" #include "testroot.i" @@ -20,37 +11,15 @@ static size_t LOTS; -(void)dealloc { id o = self; - size_t rc = 1; - testprintf("Retain a lot during dealloc\n"); + testprintf("Retain/release during dealloc\n"); - testassert(rc == 1); - testassert([o retainCount] == rc); - do { - [o retain]; - if (rc % 0x100000 == 0) testprintf("%zx/%zx ++\n", rc, LOTS); - } while (++rc < LOTS); - - testassert([o retainCount] == rc); - - do { - [o release]; - if (rc % 0x100000 == 0) testprintf("%zx/%zx --\n", rc, LOTS); - } while (--rc > 1); - - testassert(rc == 1); - testassert([o retainCount] == rc); - - - testprintf("Overrelease during dealloc\n"); - - // Not all architectures enforce this. -#if !SUPPORT_NONPOINTER_ISA - testwarn("no overrelease enforcement"); - fprintf(stderr, "no overrelease enforcement\n"); -#endif + testassertequal([o retainCount], 0); + [o retain]; + testassertequal([o retainCount], 0); [o release]; + testassertequal([o retainCount], 0); [super dealloc]; } diff --git a/test/bool.c b/test/bool.c index c12cc32..f112414 100644 --- a/test/bool.c +++ b/test/bool.c @@ -5,7 +5,11 @@ #include #if TARGET_OS_OSX -# define RealBool 0 +# if __x86_64__ +# define RealBool 0 +# else +# define RealBool 1 +# endif #elif TARGET_OS_IOS || TARGET_OS_BRIDGE # if (__arm__ && !__armv7k__) || __i386__ # define RealBool 0 diff --git a/test/cacheflush-constant.m b/test/cacheflush-constant.m new file mode 100644 index 0000000..94da6e2 --- /dev/null +++ b/test/cacheflush-constant.m @@ -0,0 +1,44 @@ +// TEST_CFLAGS -framework Foundation +/* +TEST_RUN_OUTPUT +foo +bar +bar +foo +END +*/ + +// NOTE: This test won't catch problems when running against a root, so it's of +// limited utility, but it would at least catch things when testing against the +// shared cache. + +#include +#include + +@interface NSBlock: NSObject @end + +// NSBlock is a conveniently accessible superclass that (currently) has a constant cache. +@interface MyBlock: NSBlock ++(void)foo; ++(void)bar; +@end +@implementation MyBlock ++(void)foo { + printf("foo\n"); +} ++(void)bar { + printf("bar\n"); +} +@end + +int main() { + [MyBlock foo]; + [MyBlock bar]; + + Method m1 = class_getClassMethod([MyBlock class], @selector(foo)); + Method m2 = class_getClassMethod([MyBlock class], @selector(bar)); + method_exchangeImplementations(m1, m2); + + [MyBlock foo]; + [MyBlock bar]; +} diff --git a/test/category.m b/test/category.m index 80795e2..334bc5f 100644 --- a/test/category.m +++ b/test/category.m @@ -135,25 +135,25 @@ asm( "l_OBJC_$_CATEGORY_INSTANCE_METHODS_Super_$_Category_catlist2: \n" " .long 24 \n" " .long 1 \n" -" "PTR" L_catlist2MethodString \n" -" "PTR" L_catlist2MethodTypes \n" -" "PTR" _catlist2MethodImplementation"SIGNED_CATEGORY_IMP" \n" +" " PTR " L_catlist2MethodString \n" +" " PTR " L_catlist2MethodTypes \n" +" " PTR " _catlist2MethodImplementation" SIGNED_CATEGORY_IMP" \n" " .p2align 3 \n" "l_OBJC_$_CATEGORY_Super_$_Category_catlist2: \n" -" "PTR" L_catlist2CategoryName \n" -" "PTR" _OBJC_CLASS_$_Super \n" -" "PTR" l_OBJC_$_CATEGORY_INSTANCE_METHODS_Super_$_Category_catlist2 \n" -" "PTR" 0 \n" -" "PTR" 0 \n" -" "PTR" 0 \n" -" "PTR" 0 \n" +" " PTR " L_catlist2CategoryName \n" +" " PTR " _OBJC_CLASS_$_Super \n" +" " PTR " l_OBJC_$_CATEGORY_INSTANCE_METHODS_Super_$_Category_catlist2 \n" +" " PTR " 0 \n" +" " PTR " 0 \n" +" " PTR " 0 \n" +" " PTR " 0 \n" " .long 64 \n" " .space 4 \n" " .section __DATA,__objc_catlist2 \n" " .p2align 3 \n" -" "PTR" l_OBJC_$_CATEGORY_Super_$_Category_catlist2 \n" +" " PTR " l_OBJC_$_CATEGORY_Super_$_Category_catlist2 \n" " .text \n" ); diff --git a/test/consolidatePoolPointers.m b/test/consolidatePoolPointers.m new file mode 100644 index 0000000..241df6f --- /dev/null +++ b/test/consolidatePoolPointers.m @@ -0,0 +1,142 @@ +//TEST_CONFIG MEM=mrc ARCH=x86_64,ARM64,ARM64e +//TEST_ENV OBJC_DISABLE_AUTORELEASE_COALESCING=NO OBJC_DISABLE_AUTORELEASE_COALESCING_LRU=NO + +#include "test.h" +#import +#include + +@interface Counter: NSObject { +@public + int retains; + int releases; + int autoreleases; +} +@end +@implementation Counter + +- (id)retain { + retains++; + return [super retain]; +} + +- (oneway void)release { + releases++; + [super release]; +} + +- (id)autorelease { + autoreleases++; + return [super autorelease]; +} + +- (void)dealloc { + testprintf("%p dealloc\n", self); + [super dealloc]; +} + +@end + +// Create a number of objects, autoreleasing each one a number of times in a +// round robin fashion. Verify that each object gets sent retain, release, and +// autorelease the correct number of times. Verify that the gap between +// autoreleasepool pointers is the given number of objects. Note: this will not +// work when the pool hits a page boundary, to be sure to stay under that limit. +void test(int objCount, int autoreleaseCount, int expectedGap) { + testprintf("Testing %d objects, %d autoreleases, expecting gap of %d\n", + objCount, autoreleaseCount, expectedGap); + + Counter *objs[objCount]; + for (int i = 0; i < objCount; i++) + objs[i] = [Counter new]; + + for (int j = 0; j < autoreleaseCount; j++) + for (int i = 0; i < objCount; i++) + [objs[i] retain]; + + for (int i = 0; i < objCount; i++) { + testassertequal(objs[i]->retains, autoreleaseCount); + testassertequal(objs[i]->releases, 0); + testassertequal(objs[i]->autoreleases, 0); + } + + void *outer = objc_autoreleasePoolPush(); + uintptr_t outerAddr = (uintptr_t)outer; + for (int j = 0; j < autoreleaseCount; j++) + for (int i = 0; i < objCount; i++) + [objs[i] autorelease]; + for (int i = 0; i < objCount; i++) { + testassertequal(objs[i]->retains, autoreleaseCount); + testassertequal(objs[i]->releases, 0); + testassertequal(objs[i]->autoreleases, autoreleaseCount); + } + + void *inner = objc_autoreleasePoolPush(); + uintptr_t innerAddr = (uintptr_t)inner; + testprintf("outer=%p inner=%p\n", outer, inner); + // Do one more autorelease in the inner pool to make sure we correctly + // handle pool boundaries. + for (int i = 0; i < objCount; i++) + [[objs[i] retain] autorelease]; + for (int i = 0; i < objCount; i++) { + testassertequal(objs[i]->retains, autoreleaseCount + 1); + testassertequal(objs[i]->releases, 0); + testassertequal(objs[i]->autoreleases, autoreleaseCount + 1); + } + + objc_autoreleasePoolPop(inner); + for (int i = 0; i < objCount; i++) { + testassertequal(objs[i]->retains, autoreleaseCount + 1); + testassertequal(objs[i]->releases, 1); + testassertequal(objs[i]->autoreleases, autoreleaseCount + 1); + } + + objc_autoreleasePoolPop(outer); + for (int i = 0; i < objCount; i++) { + testassertequal(objs[i]->retains, autoreleaseCount + 1); + testassertequal(objs[i]->releases, autoreleaseCount + 1); + testassertequal(objs[i]->autoreleases, autoreleaseCount + 1); + } + + intptr_t gap = innerAddr - outerAddr; + testprintf("gap=%ld\n", gap); + testassertequal(gap, expectedGap * sizeof(id)); + + // Destroy our test objects. + for (int i = 0; i < objCount; i++) + [objs[i] release]; +} + +int main() +{ + // Push a pool here so test() doesn't see a placeholder. + objc_autoreleasePoolPush(); + + test(1, 1, 2); + test(1, 2, 2); + test(1, 10, 2); + test(1, 100, 2); + test(1, 70000, 3); + + test(2, 1, 3); + test(2, 2, 3); + test(2, 10, 3); + test(2, 100, 3); + test(2, 70000, 5); + + test(3, 1, 4); + test(3, 2, 4); + test(3, 10, 4); + test(3, 100, 4); + test(3, 70000, 7); + + test(4, 1, 5); + test(4, 2, 5); + test(4, 10, 5); + test(4, 100, 5); + test(4, 70000, 9); + + test(5, 1, 6); + test(5, 2, 11); + + succeed(__FILE__); +} diff --git a/test/customrr-nsobject.m b/test/customrr-nsobject.m index 912f414..f25e4ad 100644 --- a/test/customrr-nsobject.m +++ b/test/customrr-nsobject.m @@ -10,6 +10,8 @@ typedef IMP __ptrauth_objc_method_list_imp MethodListIMP; typedef IMP MethodListIMP; #endif +EXTERN_C void _method_setImplementationRawUnsafe(Method m, IMP imp); + static int Retains; static int Releases; static int Autoreleases; @@ -64,7 +66,7 @@ int main(int argc __unused, char **argv) #if SWIZZLE_AWZ method_setImplementation(meth, (IMP)HackAllocWithZone); #else - ((MethodListIMP *)meth)[2] = (IMP)HackAllocWithZone; + _method_setImplementationRawUnsafe(meth, (IMP)HackAllocWithZone); #endif meth = class_getClassMethod(cls, @selector(new)); @@ -72,7 +74,7 @@ int main(int argc __unused, char **argv) #if SWIZZLE_CORE method_setImplementation(meth, (IMP)HackPlusNew); #else - ((MethodListIMP *)meth)[2] = (IMP)HackPlusNew; + _method_setImplementationRawUnsafe(meth, (IMP)HackPlusNew); #endif meth = class_getClassMethod(cls, @selector(self)); @@ -80,7 +82,7 @@ int main(int argc __unused, char **argv) #if SWIZZLE_CORE method_setImplementation(meth, (IMP)HackPlusSelf); #else - ((MethodListIMP *)meth)[2] = (IMP)HackPlusSelf; + _method_setImplementationRawUnsafe(meth, (IMP)HackPlusSelf); #endif meth = class_getInstanceMethod(cls, @selector(self)); @@ -88,7 +90,7 @@ int main(int argc __unused, char **argv) #if SWIZZLE_CORE method_setImplementation(meth, (IMP)HackSelf); #else - ((MethodListIMP *)meth)[2] = (IMP)HackSelf; + _method_setImplementationRawUnsafe(meth, (IMP)HackSelf); #endif meth = class_getInstanceMethod(cls, @selector(release)); @@ -96,25 +98,25 @@ int main(int argc __unused, char **argv) #if SWIZZLE_RELEASE method_setImplementation(meth, (IMP)HackRelease); #else - ((MethodListIMP *)meth)[2] = (IMP)HackRelease; + _method_setImplementationRawUnsafe(meth, (IMP)HackRelease); #endif // These other methods get hacked for counting purposes only meth = class_getInstanceMethod(cls, @selector(retain)); RealRetain = (typeof(RealRetain))method_getImplementation(meth); - ((MethodListIMP *)meth)[2] = (IMP)HackRetain; + _method_setImplementationRawUnsafe(meth, (IMP)HackRetain); meth = class_getInstanceMethod(cls, @selector(autorelease)); RealAutorelease = (typeof(RealAutorelease))method_getImplementation(meth); - ((MethodListIMP *)meth)[2] = (IMP)HackAutorelease; + _method_setImplementationRawUnsafe(meth, (IMP)HackAutorelease); meth = class_getClassMethod(cls, @selector(alloc)); RealAlloc = (typeof(RealAlloc))method_getImplementation(meth); - ((MethodListIMP *)meth)[2] = (IMP)HackAlloc; + _method_setImplementationRawUnsafe(meth, (IMP)HackAlloc); meth = class_getInstanceMethod(cls, @selector(init)); - ((MethodListIMP *)meth)[2] = (IMP)HackInit; + _method_setImplementationRawUnsafe(meth, (IMP)HackInit); // Verify that the swizzles occurred before +initialize by provoking it now testassert(PlusInitializes == 0); diff --git a/test/customrr.m b/test/customrr.m index 4ebcece..633c260 100644 --- a/test/customrr.m +++ b/test/customrr.m @@ -191,38 +191,31 @@ int main(int argc __unused, char **argv) // Don't use runtime functions to do this - // we want the runtime to think that these are NSObject's real code { -#if __has_feature(ptrauth_calls) - typedef IMP __ptrauth_objc_method_list_imp MethodListIMP; -#else - typedef IMP MethodListIMP; -#endif - Class cls = [NSObject class]; IMP imp = class_getMethodImplementation(cls, @selector(retain)); - MethodListIMP *m = (MethodListIMP *) - class_getInstanceMethod(cls, @selector(retain)); - testassert(m[2] == imp); // verify Method struct is as we expect + Method m = class_getInstanceMethod(cls, @selector(retain)); + testassert(method_getImplementation(m) == imp); // verify Method struct is as we expect - m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(retain)); - m[2] = (IMP)HackRetain; - m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(release)); - m[2] = (IMP)HackRelease; - m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(autorelease)); - m[2] = (IMP)HackAutorelease; - m = (MethodListIMP *)class_getInstanceMethod(cls, @selector(retainCount)); - m[2] = (IMP)HackRetainCount; - m = (MethodListIMP *)class_getClassMethod(cls, @selector(retain)); - m[2] = (IMP)HackPlusRetain; - m = (MethodListIMP *)class_getClassMethod(cls, @selector(release)); - m[2] = (IMP)HackPlusRelease; - m = (MethodListIMP *)class_getClassMethod(cls, @selector(autorelease)); - m[2] = (IMP)HackPlusAutorelease; - m = (MethodListIMP *)class_getClassMethod(cls, @selector(retainCount)); - m[2] = (IMP)HackPlusRetainCount; - m = (MethodListIMP *)class_getClassMethod(cls, @selector(alloc)); - m[2] = (IMP)HackAlloc; - m = (MethodListIMP *)class_getClassMethod(cls, @selector(allocWithZone:)); - m[2] = (IMP)HackAllocWithZone; + m = class_getInstanceMethod(cls, @selector(retain)); + _method_setImplementationRawUnsafe(m, (IMP)HackRetain); + m = class_getInstanceMethod(cls, @selector(release)); + _method_setImplementationRawUnsafe(m, (IMP)HackRelease); + m = class_getInstanceMethod(cls, @selector(autorelease)); + _method_setImplementationRawUnsafe(m, (IMP)HackAutorelease); + m = class_getInstanceMethod(cls, @selector(retainCount)); + _method_setImplementationRawUnsafe(m, (IMP)HackRetainCount); + m = class_getClassMethod(cls, @selector(retain)); + _method_setImplementationRawUnsafe(m, (IMP)HackPlusRetain); + m = class_getClassMethod(cls, @selector(release)); + _method_setImplementationRawUnsafe(m, (IMP)HackPlusRelease); + m = class_getClassMethod(cls, @selector(autorelease)); + _method_setImplementationRawUnsafe(m, (IMP)HackPlusAutorelease); + m = class_getClassMethod(cls, @selector(retainCount)); + _method_setImplementationRawUnsafe(m, (IMP)HackPlusRetainCount); + m = class_getClassMethod(cls, @selector(alloc)); + _method_setImplementationRawUnsafe(m, (IMP)HackAlloc); + m = class_getClassMethod(cls, @selector(allocWithZone:)); + _method_setImplementationRawUnsafe(m, (IMP)HackAllocWithZone); _objc_flush_caches(cls); diff --git a/test/evil-class-def.m b/test/evil-class-def.m index c49bda8..066691a 100644 --- a/test/evil-class-def.m +++ b/test/evil-class-def.m @@ -12,8 +12,14 @@ #if __has_feature(ptrauth_calls) # define SIGNED_METHOD_LIST_IMP "@AUTH(ia,0,addr) " +# define SIGNED_METHOD_LIST "@AUTH(da,0xC310,addr) " +# define SIGNED_ISA "@AUTH(da, 0x6AE1, addr) " +# define SIGNED_SUPER "@AUTH(da, 0xB5AB, addr) " #else # define SIGNED_METHOD_LIST_IMP +# define SIGNED_METHOD_LIST +# define SIGNED_ISA +# define SIGNED_SUPER #endif #define str(x) #x @@ -25,15 +31,15 @@ void* nop(void* self) { return self; } __END_DECLS asm( - ".globl _OBJC_CLASS_$_Super \n" - ".section __DATA,__objc_data \n" - ".align 3 \n" - "_OBJC_CLASS_$_Super: \n" - PTR "_OBJC_METACLASS_$_Super \n" - PTR "0 \n" - PTR "__objc_empty_cache \n" - PTR "0 \n" - PTR "L_ro \n" + ".globl _OBJC_CLASS_$_Super \n" + ".section __DATA,__objc_data \n" + ".align 3 \n" + "_OBJC_CLASS_$_Super: \n" + PTR "_OBJC_METACLASS_$_Super" SIGNED_ISA "\n" + PTR "0 \n" + PTR "__objc_empty_cache \n" + PTR "0 \n" + PTR "L_ro \n" // pad to OBJC_MAX_CLASS_SIZE PTR "0 \n" PTR "0 \n" @@ -63,12 +69,12 @@ asm( PTR "0 \n" PTR "0 \n" "" - "_OBJC_METACLASS_$_Super: \n" - PTR "_OBJC_METACLASS_$_Super \n" - PTR "_OBJC_CLASS_$_Super \n" - PTR "__objc_empty_cache \n" - PTR "0 \n" - PTR "L_meta_ro \n" + "_OBJC_METACLASS_$_Super: \n" + PTR "_OBJC_METACLASS_$_Super" SIGNED_ISA "\n" + PTR "_OBJC_CLASS_$_Super" SIGNED_SUPER "\n" + PTR "__objc_empty_cache \n" + PTR "0 \n" + PTR "L_meta_ro \n" // pad to OBJC_MAX_CLASS_SIZE PTR "0 \n" PTR "0 \n" @@ -108,9 +114,9 @@ asm( PTR "0 \n" PTR "L_super_name \n" #if EVIL_SUPER - PTR "L_evil_methods \n" + PTR "L_evil_methods" SIGNED_METHOD_LIST "\n" #else - PTR "L_good_methods \n" + PTR "L_good_methods" SIGNED_METHOD_LIST "\n" #endif PTR "0 \n" PTR "L_super_ivars \n" @@ -127,24 +133,24 @@ asm( PTR "0 \n" PTR "L_super_name \n" #if EVIL_SUPER_META - PTR "L_evil_methods \n" + PTR "L_evil_methods" SIGNED_METHOD_LIST "\n" #else - PTR "L_good_methods \n" + PTR "L_good_methods" SIGNED_METHOD_LIST "\n" #endif PTR "0 \n" PTR "0 \n" PTR "0 \n" PTR "0 \n" - ".globl _OBJC_CLASS_$_Sub \n" - ".section __DATA,__objc_data \n" - ".align 3 \n" - "_OBJC_CLASS_$_Sub: \n" - PTR "_OBJC_METACLASS_$_Sub \n" - PTR "_OBJC_CLASS_$_Super \n" - PTR "__objc_empty_cache \n" - PTR "0 \n" - PTR "L_sub_ro \n" + ".globl _OBJC_CLASS_$_Sub \n" + ".section __DATA,__objc_data \n" + ".align 3 \n" + "_OBJC_CLASS_$_Sub: \n" + PTR "_OBJC_METACLASS_$_Sub" SIGNED_ISA "\n" + PTR "_OBJC_CLASS_$_Super" SIGNED_SUPER "\n" + PTR "__objc_empty_cache \n" + PTR "0 \n" + PTR "L_sub_ro \n" // pad to OBJC_MAX_CLASS_SIZE PTR "0 \n" PTR "0 \n" @@ -174,12 +180,12 @@ asm( PTR "0 \n" PTR "0 \n" "" - "_OBJC_METACLASS_$_Sub: \n" - PTR "_OBJC_METACLASS_$_Super \n" - PTR "_OBJC_METACLASS_$_Super \n" - PTR "__objc_empty_cache \n" - PTR "0 \n" - PTR "L_sub_meta_ro \n" + "_OBJC_METACLASS_$_Sub: \n" + PTR "_OBJC_METACLASS_$_Super" SIGNED_ISA "\n" + PTR "_OBJC_METACLASS_$_Super" SIGNED_SUPER "\n" + PTR "__objc_empty_cache \n" + PTR "0 \n" + PTR "L_sub_meta_ro \n" // pad to OBJC_MAX_CLASS_SIZE PTR "0 \n" PTR "0 \n" @@ -219,9 +225,9 @@ asm( PTR "0 \n" PTR "L_sub_name \n" #if EVIL_SUB - PTR "L_evil_methods \n" + PTR "L_evil_methods" SIGNED_METHOD_LIST "\n" #else - PTR "L_good_methods \n" + PTR "L_good_methods" SIGNED_METHOD_LIST "\n" #endif PTR "0 \n" PTR "L_sub_ivars \n" @@ -238,9 +244,9 @@ asm( PTR "0 \n" PTR "L_sub_name \n" #if EVIL_SUB_META - PTR "L_evil_methods \n" + PTR "L_evil_methods" SIGNED_METHOD_LIST "\n" #else - PTR "L_good_methods \n" + PTR "L_good_methods" SIGNED_METHOD_LIST "\n" #endif PTR "0 \n" PTR "0 \n" diff --git a/test/exchangeImp.m b/test/exchangeImp.m index da84f94..489c691 100644 --- a/test/exchangeImp.m +++ b/test/exchangeImp.m @@ -24,6 +24,9 @@ END #include static int state; +static int swizzleOld; +static int swizzleNew; +static int swizzleB; #define ONE 1 #define TWO 2 @@ -36,6 +39,13 @@ static int state; +(void) two { state = TWO; } +(void) length { state = LENGTH; } +(void) count { state = COUNT; } + +-(void) swizzleTarget { + swizzleOld++; +} +-(void) swizzleReplacement { + swizzleNew++; +} @end #define checkExchange(s1, v1, s2, v2) \ @@ -90,6 +100,42 @@ static int state; testassert(state == v2); \ } while (0) +@interface A : Super +@end +@implementation A +@end + +@interface B : Super +@end +@implementation B +- (void) swizzleTarget { + swizzleB++; +} +@end + +@interface C : Super +@end +@implementation C +- (void) hello { } +@end + +static IMP findInCache(Class cls, SEL sel) +{ + struct objc_imp_cache_entry *ents; + int count; + IMP ret = nil; + + ents = class_copyImpCache(cls, &count); + for (int i = 0; i < count; i++) { + if (ents[i].sel == sel) { + ret = ents[i].imp; + break; + } + } + free(ents); + return ret; +} + int main() { // Check ordinary selectors @@ -102,5 +148,66 @@ int main() checkExchange(count, COUNT, one, ONE); checkExchange(two, TWO, length, LENGTH); + Super *s = [Super new]; + A *a = [A new]; + B *b = [B new]; + C *c = [C new]; + + // cache swizzleTarget in Super, A and B + [s swizzleTarget]; + testassert(swizzleOld == 1); + testassert(swizzleNew == 0); + testassert(swizzleB == 0); + testassert(findInCache([Super class], @selector(swizzleTarget)) != nil); + + [a swizzleTarget]; + testassert(swizzleOld == 2); + testassert(swizzleNew == 0); + testassert(swizzleB == 0); + testassert(findInCache([A class], @selector(swizzleTarget)) != nil); + + [b swizzleTarget]; + testassert(swizzleOld == 2); + testassert(swizzleNew == 0); + testassert(swizzleB == 1); + testassert(findInCache([B class], @selector(swizzleTarget)) != nil); + + // prime C's cache too + [c hello]; + testassert(findInCache([C class], @selector(hello)) != nil); + + Method m1 = class_getInstanceMethod([Super class], @selector(swizzleTarget)); + Method m2 = class_getInstanceMethod([Super class], @selector(swizzleReplacement)); + method_exchangeImplementations(m1, m2); + + // this should invalidate Super, A, but: + // - not B because it overrides - swizzleTarget and hence doesn't care + // - not C because it neither called swizzleTarget nor swizzleReplacement + testassert(findInCache([Super class], @selector(swizzleTarget)) == nil); + testassert(findInCache([A class], @selector(swizzleTarget)) == nil); + testassert(findInCache([B class], @selector(swizzleTarget)) != nil); + testassert(findInCache([C class], @selector(hello)) != nil); + + // now check that all lookups do the right thing + [s swizzleTarget]; + testassert(swizzleOld == 2); + testassert(swizzleNew == 1); + testassert(swizzleB == 1); + + [a swizzleTarget]; + testassert(swizzleOld == 2); + testassert(swizzleNew == 2); + testassert(swizzleB == 1); + + [b swizzleTarget]; + testassert(swizzleOld == 2); + testassert(swizzleNew == 2); + testassert(swizzleB == 2); + + [c swizzleTarget]; + testassert(swizzleOld == 2); + testassert(swizzleNew == 3); + testassert(swizzleB == 2); + succeed(__FILE__); } diff --git a/test/fakeRealizedClass.m b/test/fakeRealizedClass.m new file mode 100644 index 0000000..cec1c12 --- /dev/null +++ b/test/fakeRealizedClass.m @@ -0,0 +1,74 @@ +/* +Make sure we detect classes with the RW_REALIZED bit set in the binary. rdar://problem/67692760 +TEST_CONFIG OS=macosx +TEST_CRASHES +TEST_RUN_OUTPUT +objc\[\d+\]: realized class 0x[0-9a-fA-F]+ has corrupt data pointer 0x[0-9a-fA-F]+ +objc\[\d+\]: HALTED +END +*/ + +#include "test.h" + +#include + +#define RW_REALIZED (1U<<31) + +struct ObjCClass { + struct ObjCClass * __ptrauth_objc_isa_pointer isa; + struct ObjCClass * __ptrauth_objc_super_pointer superclass; + void *cachePtr; + uintptr_t zero; + uintptr_t data; +}; + +struct ObjCClass_ro { + uint32_t flags; + uint32_t instanceStart; + uint32_t instanceSize; +#ifdef __LP64__ + uint32_t reserved; +#endif + + union { + const uint8_t * ivarLayout; + struct ObjCClass * nonMetaClass; + }; + + const char * name; + struct ObjCMethodList * __ptrauth_objc_method_list_pointer baseMethodList; + struct protocol_list_t * baseProtocols; + const struct ivar_list_t * ivars; + + const uint8_t * weakIvarLayout; + struct property_list_t *baseProperties; +}; + +extern struct ObjCClass OBJC_METACLASS_$_NSObject; +extern struct ObjCClass OBJC_CLASS_$_NSObject; + +struct ObjCClass_ro FakeSuperclassRO = { + .flags = RW_REALIZED +}; + +struct ObjCClass FakeSuperclass = { + &OBJC_METACLASS_$_NSObject, + &OBJC_METACLASS_$_NSObject, + NULL, + 0, + (uintptr_t)&FakeSuperclassRO +}; + +struct ObjCClass_ro FakeSubclassRO; + +struct ObjCClass FakeSubclass = { + &FakeSuperclass, + &FakeSuperclass, + NULL, + 0, + (uintptr_t)&FakeSubclassRO +}; + +static struct ObjCClass *class_ptr __attribute__((used)) __attribute((section("__DATA,__objc_nlclslist"))) = &FakeSubclass; + +int main() {} diff --git a/test/fakeRealizedClass2.m b/test/fakeRealizedClass2.m new file mode 100644 index 0000000..487c4d2 --- /dev/null +++ b/test/fakeRealizedClass2.m @@ -0,0 +1,74 @@ +/* +Variant on fakeRealizedClass which tests a fake class with no superclass rdar://problem/67692760 +TEST_CONFIG OS=macosx +TEST_CRASHES +TEST_RUN_OUTPUT +objc\[\d+\]: realized class 0x[0-9a-fA-F]+ has corrupt data pointer 0x[0-9a-fA-F]+ +objc\[\d+\]: HALTED +END +*/ + +#include "test.h" + +#include + +#define RW_REALIZED (1U<<31) + +struct ObjCClass { + struct ObjCClass * __ptrauth_objc_isa_pointer isa; + struct ObjCClass * __ptrauth_objc_super_pointer superclass; + void *cachePtr; + uintptr_t zero; + uintptr_t data; +}; + +struct ObjCClass_ro { + uint32_t flags; + uint32_t instanceStart; + uint32_t instanceSize; +#ifdef __LP64__ + uint32_t reserved; +#endif + + union { + const uint8_t * ivarLayout; + struct ObjCClass * nonMetaClass; + }; + + const char * name; + struct ObjCMethodList * __ptrauth_objc_method_list_pointer baseMethodList; + struct protocol_list_t * baseProtocols; + const struct ivar_list_t * ivars; + + const uint8_t * weakIvarLayout; + struct property_list_t *baseProperties; +}; + +extern struct ObjCClass OBJC_METACLASS_$_NSObject; +extern struct ObjCClass OBJC_CLASS_$_NSObject; + +struct ObjCClass_ro FakeSuperclassRO = { + .flags = RW_REALIZED +}; + +struct ObjCClass FakeSuperclass = { + &OBJC_METACLASS_$_NSObject, + NULL, + NULL, + 0, + (uintptr_t)&FakeSuperclassRO +}; + +struct ObjCClass_ro FakeSubclassRO; + +struct ObjCClass FakeSubclass = { + &FakeSuperclass, + &FakeSuperclass, + NULL, + 0, + (uintptr_t)&FakeSubclassRO +}; + +static struct ObjCClass *class_ptr __attribute__((used)) __attribute((section("__DATA,__objc_nlclslist"))) = &FakeSubclass; + +int main() {} diff --git a/test/forward.m b/test/forward.m index 517f5e2..e1d133d 100644 --- a/test/forward.m +++ b/test/forward.m @@ -67,7 +67,7 @@ long long forward_handler(id self, SEL _cmd, long i1, long i2, long i3, long i4, # define p "w" // arm64_32 # endif void *struct_addr; - __asm__ volatile("mov %"p"0, "p"8" : "=r" (struct_addr) : : p"8"); + __asm__ volatile("mov %" p "0, " p "8" : "=r" (struct_addr) : : p "8"); #endif testassert(self == receiver); diff --git a/test/gc-main.m b/test/gc-main.m deleted file mode 100644 index 44f7476..0000000 --- a/test/gc-main.m +++ /dev/null @@ -1,10 +0,0 @@ -#include "test.h" - -OBJC_ROOT_CLASS -@interface Main @end -@implementation Main @end - -int main(int argc __attribute__((unused)), char **argv) -{ - succeed(basename(argv[0])); -} diff --git a/test/gc.c b/test/gc.c deleted file mode 100644 index dab0f7b..0000000 --- a/test/gc.c +++ /dev/null @@ -1 +0,0 @@ -int GC(void) { return 42; } diff --git a/test/gc.m b/test/gc.m deleted file mode 100644 index 65ba5f9..0000000 --- a/test/gc.m +++ /dev/null @@ -1,8 +0,0 @@ -#import - -OBJC_ROOT_CLASS -@interface GC @end -@implementation GC @end - -// silence "no debug symbols in executable" warning -void foo(void) { } diff --git a/test/gcenforcer-app-aso.m b/test/gcenforcer-app-aso.m deleted file mode 100644 index 8507a62..0000000 --- a/test/gcenforcer-app-aso.m +++ /dev/null @@ -1,12 +0,0 @@ -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 - -TEST_BUILD - cp $DIR/gcfiles/$C{ARCH}-aso gcenforcer-app-aso.exe -END - -TEST_RUN_OUTPUT -.*No Info\.plist file in application bundle or no NSPrincipalClass in the Info\.plist file, exiting -END -*/ diff --git a/test/gcenforcer-app-gc.m b/test/gcenforcer-app-gc.m deleted file mode 100644 index a8ff65b..0000000 --- a/test/gcenforcer-app-gc.m +++ /dev/null @@ -1,14 +0,0 @@ -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 - -TEST_BUILD - cp $DIR/gcfiles/$C{ARCH}-gc gcenforcer-app-gc.exe -END - -TEST_CRASHES -TEST_RUN_OUTPUT -objc\[\d+\]: Objective-C garbage collection is no longer supported\. -objc\[\d+\]: HALTED -END -*/ diff --git a/test/gcenforcer-app-gcaso.m b/test/gcenforcer-app-gcaso.m deleted file mode 100644 index 2094937..0000000 --- a/test/gcenforcer-app-gcaso.m +++ /dev/null @@ -1,14 +0,0 @@ -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 - -TEST_BUILD - cp $DIR/gcfiles/$C{ARCH}-gcaso gcenforcer-app-gcaso.exe -END - -TEST_CRASHES -TEST_RUN_OUTPUT -objc\[\d+\]: Objective-C garbage collection is no longer supported\. -objc\[\d+\]: HALTED -END -*/ diff --git a/test/gcenforcer-app-gcaso2.m b/test/gcenforcer-app-gcaso2.m deleted file mode 100644 index 8231993..0000000 --- a/test/gcenforcer-app-gcaso2.m +++ /dev/null @@ -1,14 +0,0 @@ -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 - -TEST_BUILD - cp $DIR/gcfiles/$C{ARCH}-gcaso2 gcenforcer-app-gcaso2.exe -END - -TEST_CRASHES -TEST_RUN_OUTPUT -objc\[\d+\]: Objective-C garbage collection is no longer supported\. -objc\[\d+\]: HALTED -END -*/ diff --git a/test/gcenforcer-app-gconly.m b/test/gcenforcer-app-gconly.m deleted file mode 100644 index 1b8e6a6..0000000 --- a/test/gcenforcer-app-gconly.m +++ /dev/null @@ -1,14 +0,0 @@ -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 - -TEST_BUILD - cp $DIR/gcfiles/$C{ARCH}-gconly gcenforcer-app-gconly.exe -END - -TEST_CRASHES -TEST_RUN_OUTPUT -objc\[\d+\]: Objective-C garbage collection is no longer supported\. -objc\[\d+\]: HALTED -END -*/ diff --git a/test/gcenforcer-app-nogc.m b/test/gcenforcer-app-nogc.m deleted file mode 100644 index d99db0f..0000000 --- a/test/gcenforcer-app-nogc.m +++ /dev/null @@ -1,12 +0,0 @@ -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 - -TEST_BUILD - cp $DIR/gcfiles/$C{ARCH}-nogc gcenforcer-app-nogc.exe -END - -TEST_RUN_OUTPUT -running -END -*/ diff --git a/test/gcenforcer-app-noobjc.m b/test/gcenforcer-app-noobjc.m deleted file mode 100644 index ad746c3..0000000 --- a/test/gcenforcer-app-noobjc.m +++ /dev/null @@ -1,12 +0,0 @@ -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 - -TEST_BUILD - cp $DIR/gcfiles/$C{ARCH}-noobjc gcenforcer-app-noobjc.exe -END - -TEST_RUN_OUTPUT - -END -*/ diff --git a/test/gcenforcer-dylib-nogc.m b/test/gcenforcer-dylib-nogc.m deleted file mode 100644 index b10fbe1..0000000 --- a/test/gcenforcer-dylib-nogc.m +++ /dev/null @@ -1,11 +0,0 @@ -// gc-off app loading gc-off dylib: should work - -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 - -TEST_BUILD - cp $DIR/gcfiles/libnogc.dylib . - $C{COMPILE} $DIR/gc-main.m -x none libnogc.dylib -o gcenforcer-dylib-nogc.exe -END -*/ diff --git a/test/gcenforcer-dylib-noobjc.m b/test/gcenforcer-dylib-noobjc.m deleted file mode 100644 index a06fa54..0000000 --- a/test/gcenforcer-dylib-noobjc.m +++ /dev/null @@ -1,9 +0,0 @@ -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 - -TEST_BUILD - cp $DIR/gcfiles/libnoobjc.dylib . - $C{COMPILE} $DIR/gc-main.m -x none libnoobjc.dylib -o gcenforcer-dylib-noobjc.exe -END -*/ diff --git a/test/gcenforcer-dylib-requiresgc.m b/test/gcenforcer-dylib-requiresgc.m deleted file mode 100644 index 69a4d25..0000000 --- a/test/gcenforcer-dylib-requiresgc.m +++ /dev/null @@ -1,22 +0,0 @@ -// gc-off app loading gc-required dylib: should crash -// linker sees librequiresgc.fake.dylib, runtime uses librequiresgc.dylib - -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 -TEST_CRASHES - -TEST_RUN_OUTPUT -dyld: Library not loaded: librequiresgc\.dylib - Referenced from: .*gcenforcer-dylib-requiresgc.exe - Reason: no suitable image found\. Did find: - (.*librequiresgc\.dylib: cannot load '.*librequiresgc\.dylib' because Objective-C garbage collection is not supported(\n)?)+ - librequiresgc.dylib: cannot load 'librequiresgc\.dylib' because Objective-C garbage collection is not supported( - .*librequiresgc\.dylib: cannot load '.*librequiresgc\.dylib' because Objective-C garbage collection is not supported(\n)?)* -END - -TEST_BUILD - cp $DIR/gcfiles/librequiresgc.dylib . - $C{COMPILE} $DIR/gc-main.m -x none $DIR/gcfiles/librequiresgc.fake.dylib -o gcenforcer-dylib-requiresgc.exe -END -*/ diff --git a/test/gcenforcer-dylib-supportsgc.m b/test/gcenforcer-dylib-supportsgc.m deleted file mode 100644 index d8ce9e3..0000000 --- a/test/gcenforcer-dylib-supportsgc.m +++ /dev/null @@ -1,9 +0,0 @@ -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 - -TEST_BUILD - cp $DIR/gcfiles/libsupportsgc.dylib . - $C{COMPILE} $DIR/gc-main.m -x none libsupportsgc.dylib -o gcenforcer-dylib-supportsgc.exe -END -*/ diff --git a/test/gcenforcer-preflight.m b/test/gcenforcer-preflight.m deleted file mode 100644 index 828cc33..0000000 --- a/test/gcenforcer-preflight.m +++ /dev/null @@ -1,88 +0,0 @@ -#pragma clang diagnostic ignored "-Wcomment" -/* -fixme disabled in BATS because of gcfiles -TEST_CONFIG OS=macosx BATS=0 - -TEST_BUILD - cp $DIR/gcfiles/* . - $C{COMPILE} $DIR/gcenforcer-preflight.m -o gcenforcer-preflight.exe -END -*/ - -#include "test.h" -#include - -void check(int expected, const char *name) -{ - int fd = open(name, O_RDONLY); - testassert(fd >= 0); - - int result = objc_appRequiresGC(fd); - - close(fd); - testprintf("want %2d got %2d for %s\n", expected, result, name); - if (result != expected) { - fail("want %2d got %2d for %s\n", expected, result, name); - } - testassert(result == expected); -} - -int main() -{ - int i; - for (i = 0; i < 1000; i++) { - // dlopen_preflight - - testassert(dlopen_preflight("libsupportsgc.dylib")); - testassert(dlopen_preflight("libnoobjc.dylib")); - testassert(! dlopen_preflight("librequiresgc.dylib")); - testassert(dlopen_preflight("libnogc.dylib")); - - // objc_appRequiresGC - - // noobjc: no ObjC content - // nogc: ordinary not GC - // aso: trivial AppleScriptObjC wrapper that can run without GC - // gc: -fobjc-gc - // gconly: -fobjc-gc-only - // gcaso: non-trivial AppleScriptObjC with too many classrefs - // gcaso2: non-trivial AppleScriptObjC with too many class impls - - check(0, "x86_64-noobjc"); - check(0, "x86_64-nogc"); - check(0, "x86_64-aso"); - check(1, "x86_64-gc"); - check(1, "x86_64-gconly"); - check(1, "x86_64-gcaso"); - check(1, "x86_64-gcaso2"); - - check(0, "i386-noobjc"); - check(0, "i386-nogc"); - check(0, "i386-aso"); - check(1, "i386-gc"); - check(1, "i386-gconly"); - check(1, "i386-gcaso"); - check(1, "i386-gcaso2"); - - // fat files - check(0, "i386-aso--x86_64-aso"); - check(0, "i386-nogc--x86_64-nogc"); - check(1, "i386-gc--x86_64-gc"); - check(1, "i386-gc--x86_64-nogc"); - check(1, "i386-nogc--x86_64-gc"); - - // broken files - check(-1, "x86_64-broken"); - check(-1, "i386-broken"); - check(-1, "i386-broken--x86_64-gc"); - check(-1, "i386-broken--x86_64-nogc"); - check(-1, "i386-gc--x86_64-broken"); - check(-1, "i386-nogc--x86_64-broken"); - - // evil files - // evil1: claims to have 4 billion load commands of size 0 - check(-1, "evil1"); - } - - succeed(__FILE__); -} diff --git a/test/gcfiles/evil1 b/test/gcfiles/evil1 deleted file mode 100644 index 88bd337..0000000 Binary files a/test/gcfiles/evil1 and /dev/null differ diff --git a/test/gcfiles/i386-aso b/test/gcfiles/i386-aso deleted file mode 100644 index 4d13777..0000000 Binary files a/test/gcfiles/i386-aso and /dev/null differ diff --git a/test/gcfiles/i386-aso--x86_64-aso b/test/gcfiles/i386-aso--x86_64-aso deleted file mode 100644 index 93b3316..0000000 Binary files a/test/gcfiles/i386-aso--x86_64-aso and /dev/null differ diff --git a/test/gcfiles/i386-broken b/test/gcfiles/i386-broken deleted file mode 100644 index 98ab151..0000000 Binary files a/test/gcfiles/i386-broken and /dev/null differ diff --git a/test/gcfiles/i386-broken--x86_64-gc b/test/gcfiles/i386-broken--x86_64-gc deleted file mode 100644 index b22be31..0000000 Binary files a/test/gcfiles/i386-broken--x86_64-gc and /dev/null differ diff --git a/test/gcfiles/i386-broken--x86_64-nogc b/test/gcfiles/i386-broken--x86_64-nogc deleted file mode 100644 index a401acc..0000000 Binary files a/test/gcfiles/i386-broken--x86_64-nogc and /dev/null differ diff --git a/test/gcfiles/i386-gc b/test/gcfiles/i386-gc deleted file mode 100644 index 15c0796..0000000 Binary files a/test/gcfiles/i386-gc and /dev/null differ diff --git a/test/gcfiles/i386-gc--x86_64-broken b/test/gcfiles/i386-gc--x86_64-broken deleted file mode 100644 index bfecbd3..0000000 Binary files a/test/gcfiles/i386-gc--x86_64-broken and /dev/null differ diff --git a/test/gcfiles/i386-gc--x86_64-gc b/test/gcfiles/i386-gc--x86_64-gc deleted file mode 100644 index a753540..0000000 Binary files a/test/gcfiles/i386-gc--x86_64-gc and /dev/null differ diff --git a/test/gcfiles/i386-gc--x86_64-nogc b/test/gcfiles/i386-gc--x86_64-nogc deleted file mode 100644 index 3a237e1..0000000 Binary files a/test/gcfiles/i386-gc--x86_64-nogc and /dev/null differ diff --git a/test/gcfiles/i386-gcaso b/test/gcfiles/i386-gcaso deleted file mode 100644 index 7c6cbf5..0000000 Binary files a/test/gcfiles/i386-gcaso and /dev/null differ diff --git a/test/gcfiles/i386-gcaso2 b/test/gcfiles/i386-gcaso2 deleted file mode 100644 index 5e55c61..0000000 Binary files a/test/gcfiles/i386-gcaso2 and /dev/null differ diff --git a/test/gcfiles/i386-gconly b/test/gcfiles/i386-gconly deleted file mode 100644 index 9102747..0000000 Binary files a/test/gcfiles/i386-gconly and /dev/null differ diff --git a/test/gcfiles/i386-nogc b/test/gcfiles/i386-nogc deleted file mode 100644 index 4349810..0000000 Binary files a/test/gcfiles/i386-nogc and /dev/null differ diff --git a/test/gcfiles/i386-nogc--x86_64-broken b/test/gcfiles/i386-nogc--x86_64-broken deleted file mode 100644 index 6570d39..0000000 Binary files a/test/gcfiles/i386-nogc--x86_64-broken and /dev/null differ diff --git a/test/gcfiles/i386-nogc--x86_64-gc b/test/gcfiles/i386-nogc--x86_64-gc deleted file mode 100644 index 3f3b82c..0000000 Binary files a/test/gcfiles/i386-nogc--x86_64-gc and /dev/null differ diff --git a/test/gcfiles/i386-nogc--x86_64-nogc b/test/gcfiles/i386-nogc--x86_64-nogc deleted file mode 100644 index 48dce6a..0000000 Binary files a/test/gcfiles/i386-nogc--x86_64-nogc and /dev/null differ diff --git a/test/gcfiles/i386-noobjc b/test/gcfiles/i386-noobjc deleted file mode 100644 index 8f01860..0000000 Binary files a/test/gcfiles/i386-noobjc and /dev/null differ diff --git a/test/gcfiles/libnogc.dylib b/test/gcfiles/libnogc.dylib deleted file mode 100644 index 110dfa2..0000000 Binary files a/test/gcfiles/libnogc.dylib and /dev/null differ diff --git a/test/gcfiles/libnoobjc.dylib b/test/gcfiles/libnoobjc.dylib deleted file mode 100644 index 56167e2..0000000 Binary files a/test/gcfiles/libnoobjc.dylib and /dev/null differ diff --git a/test/gcfiles/librequiresgc.dylib b/test/gcfiles/librequiresgc.dylib deleted file mode 100644 index b6080c7..0000000 Binary files a/test/gcfiles/librequiresgc.dylib and /dev/null differ diff --git a/test/gcfiles/librequiresgc.fake.dylib b/test/gcfiles/librequiresgc.fake.dylib deleted file mode 100644 index 04bfb35..0000000 Binary files a/test/gcfiles/librequiresgc.fake.dylib and /dev/null differ diff --git a/test/gcfiles/libsupportsgc.dylib b/test/gcfiles/libsupportsgc.dylib deleted file mode 100644 index 8a1efda..0000000 Binary files a/test/gcfiles/libsupportsgc.dylib and /dev/null differ diff --git a/test/gcfiles/x86_64-aso b/test/gcfiles/x86_64-aso deleted file mode 100644 index 695c191..0000000 Binary files a/test/gcfiles/x86_64-aso and /dev/null differ diff --git a/test/gcfiles/x86_64-broken b/test/gcfiles/x86_64-broken deleted file mode 100644 index 4346f8a..0000000 Binary files a/test/gcfiles/x86_64-broken and /dev/null differ diff --git a/test/gcfiles/x86_64-gc b/test/gcfiles/x86_64-gc deleted file mode 100644 index 23246c2..0000000 Binary files a/test/gcfiles/x86_64-gc and /dev/null differ diff --git a/test/gcfiles/x86_64-gcaso b/test/gcfiles/x86_64-gcaso deleted file mode 100644 index 9a58c23..0000000 Binary files a/test/gcfiles/x86_64-gcaso and /dev/null differ diff --git a/test/gcfiles/x86_64-gcaso2 b/test/gcfiles/x86_64-gcaso2 deleted file mode 100644 index 3ac79a2..0000000 Binary files a/test/gcfiles/x86_64-gcaso2 and /dev/null differ diff --git a/test/gcfiles/x86_64-noobjc b/test/gcfiles/x86_64-noobjc deleted file mode 100644 index 7c8b050..0000000 Binary files a/test/gcfiles/x86_64-noobjc and /dev/null differ diff --git a/test/gdb.m b/test/gdb.m index e2c8b7d..22e0334 100644 --- a/test/gdb.m +++ b/test/gdb.m @@ -23,9 +23,31 @@ int main() [TestRoot class]; // Now class should be realized - result = (__bridge Class)(NXMapGet(gdb_objc_realized_classes, "TestRoot")); + if (!testdyld3()) { + // In dyld3 mode, the class will be in the launch closure and not in our table. + result = (__bridge Class)(NXMapGet(gdb_objc_realized_classes, "TestRoot")); + testassert(result); + testassert(result == [TestRoot class]); + } + + Class dynamic = objc_allocateClassPair([TestRoot class], "Dynamic", 0); + objc_registerClassPair(dynamic); + result = (__bridge Class)(NXMapGet(gdb_objc_realized_classes, "Dynamic")); testassert(result); - testassert(result == [TestRoot class]); + testassert(result == dynamic); + + Class *realizedClasses = objc_copyRealizedClassList(NULL); + bool foundTestRoot = false; + bool foundDynamic = false; + for (Class *cursor = realizedClasses; *cursor; cursor++) { + if (*cursor == [TestRoot class]) + foundTestRoot = true; + if (*cursor == dynamic) + foundDynamic = true; + } + free(realizedClasses); + testassert(foundTestRoot); + testassert(foundDynamic); result = (__bridge Class)(NXMapGet(gdb_objc_realized_classes, "DoesNotExist")); testassert(!result); diff --git a/test/headers.sh b/test/headers.sh old mode 100644 new mode 100755 diff --git a/test/isaValidation.m b/test/isaValidation.m index bb26808..4d70b88 100644 --- a/test/isaValidation.m +++ b/test/isaValidation.m @@ -60,12 +60,8 @@ objc\[\d+\]: Attempt to use unknown class 0x[0-9a-f]+. objc\[\d+\]: HALTED Testing class_setIvarLayout objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'TestRoot' -objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'TestRoot' -objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'NSObject' objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'NSObject' objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'AllocatedTestClass2' -objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'AllocatedTestClass2' -objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'TestRoot' objc\[\d+\]: \*\*\* Can't set ivar layout for already-registered class 'DuplicateClass' Completed test on good classes. objc\[\d+\]: Attempt to use unknown class 0x[0-9a-f]+. @@ -156,7 +152,7 @@ struct TestCase TestCases[] = { TESTCASE(class_addProtocol(cls, @protocol(P))), TESTCASE(class_addProperty(cls, "x", NULL, 0)), TESTCASE(class_replaceProperty(cls, "x", NULL, 0)), - TESTCASE(class_setIvarLayout(cls, NULL)), + TESTCASE_NOMETA(class_setIvarLayout(cls, NULL)), TESTCASE(class_setWeakIvarLayout(cls, NULL)), TESTCASE_NOMETA(objc_registerClassPair(cls)), TESTCASE_NOMETA(objc_duplicateClass(cls, dupeName(cls), 0)), diff --git a/test/ivarSlide.m b/test/ivarSlide.m index dcc0bcc..a19e1e1 100644 --- a/test/ivarSlide.m +++ b/test/ivarSlide.m @@ -23,6 +23,14 @@ END #define ustrcmp(a, b) strcmp((char *)a, (char *)b) +// Aliasing-friendly way to read from a fixed offset in an object. +uintptr_t readWord(id obj, int offset) { + uintptr_t value; + char *ptr = (char *)(__bridge void*)obj; + memcpy(&value, ptr + offset * sizeof(uintptr_t), sizeof(uintptr_t)); + return value; +} + #ifdef __cplusplus class CXX { public: @@ -175,15 +183,14 @@ int main(int argc __attribute__((unused)), char **argv) static Sub * volatile sub; sub = [Sub new]; sub->subIvar = 10; - uintptr_t *subwords = (uintptr_t *)(__bridge void*)sub; - testassert(subwords[2] == 10); + testassertequal(readWord(sub, 2), 10); #ifdef __cplusplus - testassert(subwords[5] == 1); - testassert(sub->cxx.magic == 1); + testassertequal(readWord(sub, 5), 1); + testassertequal(sub->cxx.magic, 1); sub->cxx.magic++; - testassert(subwords[5] == 2); - testassert(sub->cxx.magic == 2); + testassertequal(readWord(sub, 5), 2); + testassertequal(sub->cxx.magic, 2); # if __has_feature(objc_arc) sub = nil; # else @@ -254,15 +261,14 @@ int main(int argc __attribute__((unused)), char **argv) */ Sub2 *sub2 = [Sub2 new]; - uintptr_t *sub2words = (uintptr_t *)(__bridge void*)sub2; sub2->subIvar = (void *)10; - testassert(sub2words[11] == 10); + testassertequal(readWord(sub2, 11), 10); - testassert(class_getInstanceSize([Sub2 class]) == 13*sizeof(void*)); + testassertequal(class_getInstanceSize([Sub2 class]), 13*sizeof(void*)); ivar = class_getInstanceVariable([Sub2 class], "subIvar"); testassert(ivar); - testassert(11*sizeof(void*) == (size_t)ivar_getOffset(ivar)); + testassertequal(11*sizeof(void*), (size_t)ivar_getOffset(ivar)); testassert(0 == strcmp(ivar_getName(ivar), "subIvar")); ivar = class_getInstanceVariable([ShrinkingSuper class], "superIvar"); diff --git a/test/lazyClassName.m b/test/lazyClassName.m new file mode 100644 index 0000000..264c20f --- /dev/null +++ b/test/lazyClassName.m @@ -0,0 +1,136 @@ +/* +TEST_RUN_OUTPUT +LazyClassName +LazyClassName2 +END +*/ + +#include "test.h" +#include "testroot.i" + +typedef const char * _Nullable (*objc_hook_lazyClassNamer)(_Nonnull Class); + +void objc_setHook_lazyClassNamer(_Nonnull objc_hook_lazyClassNamer newValue, + _Nonnull objc_hook_lazyClassNamer * _Nonnull oldOutValue); + +#define RW_COPIED_RO (1<<27) + +struct ObjCClass { + struct ObjCClass * __ptrauth_objc_isa_pointer isa; + struct ObjCClass * __ptrauth_objc_super_pointer superclass; + void *cachePtr; + uintptr_t zero; + uintptr_t data; +}; + +struct ObjCClass_ro { + uint32_t flags; + uint32_t instanceStart; + uint32_t instanceSize; +#ifdef __LP64__ + uint32_t reserved; +#endif + + union { + const uint8_t * ivarLayout; + struct ObjCClass * nonMetaClass; + }; + + const char * name; + struct ObjCMethodList * __ptrauth_objc_method_list_pointer baseMethodList; + struct protocol_list_t * baseProtocols; + const struct ivar_list_t * ivars; + + const uint8_t * weakIvarLayout; + struct property_list_t *baseProperties; +}; + +extern struct ObjCClass OBJC_METACLASS_$_NSObject; +extern struct ObjCClass OBJC_CLASS_$_NSObject; + +extern struct ObjCClass LazyClassName; +extern struct ObjCClass LazyClassName2; + +struct ObjCClass_ro LazyClassNameMetaclass_ro = { + .flags = 1, + .instanceStart = 40, + .instanceSize = 40, + .nonMetaClass = &LazyClassName, +}; + +struct ObjCClass LazyClassNameMetaclass = { + .isa = &OBJC_METACLASS_$_NSObject, + .superclass = &OBJC_METACLASS_$_NSObject, + .cachePtr = &_objc_empty_cache, + .data = (uintptr_t)&LazyClassNameMetaclass_ro, +}; + +struct ObjCClass_ro LazyClassName_ro = { + .instanceStart = 8, + .instanceSize = 8, +}; + +struct ObjCClass LazyClassName = { + .isa = &LazyClassNameMetaclass, + .superclass = &OBJC_CLASS_$_NSObject, + .cachePtr = &_objc_empty_cache, + .data = (uintptr_t)&LazyClassName_ro + 2, +}; + +struct ObjCClass_ro LazyClassName2Metaclass_ro = { + .flags = 1, + .instanceStart = 40, + .instanceSize = 40, + .nonMetaClass = &LazyClassName2, +}; + +struct ObjCClass LazyClassName2Metaclass = { + .isa = &OBJC_METACLASS_$_NSObject, + .superclass = &OBJC_METACLASS_$_NSObject, + .cachePtr = &_objc_empty_cache, + .data = (uintptr_t)&LazyClassName2Metaclass_ro, +}; + +struct ObjCClass_ro LazyClassName2_ro = { + .instanceStart = 8, + .instanceSize = 8, +}; + +struct ObjCClass LazyClassName2 = { + .isa = &LazyClassName2Metaclass, + .superclass = &OBJC_CLASS_$_NSObject, + .cachePtr = &_objc_empty_cache, + .data = (uintptr_t)&LazyClassName2_ro + 2, +}; + +static objc_hook_lazyClassNamer OrigNamer; + +static const char *ClassNamer(Class cls) { + if (cls == (__bridge Class)&LazyClassName) + return "LazyClassName"; + return OrigNamer(cls); +} + +static objc_hook_lazyClassNamer OrigNamer2; + +static const char *ClassNamer2(Class cls) { + if (cls == (__bridge Class)&LazyClassName2) + return "LazyClassName2"; + return OrigNamer2(cls); +} + +__attribute__((section("__DATA,__objc_classlist,regular,no_dead_strip"))) +struct ObjCClass *LazyClassNamePtr = &LazyClassName; +__attribute__((section("__DATA,__objc_classlist,regular,no_dead_strip"))) +struct ObjCClass *LazyClassNamePtr2 = &LazyClassName2; + +int main() { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunguarded-availability" + objc_setHook_lazyClassNamer(ClassNamer, &OrigNamer); + objc_setHook_lazyClassNamer(ClassNamer2, &OrigNamer2); +#pragma clang diagnostic pop + + printf("%s\n", class_getName([(__bridge id)&LazyClassName class])); + printf("%s\n", class_getName([(__bridge id)&LazyClassName2 class])); +} diff --git a/test/libraryPath.c b/test/libraryPath.c new file mode 100644 index 0000000..af3151a --- /dev/null +++ b/test/libraryPath.c @@ -0,0 +1,55 @@ +// TEST_CFLAGS -lobjc + +#include "test.h" +#include + +// We use DYLD_LIBRARY_PATH to run the tests against a particular copy of +// libobjc. If this fails somehow (path is wrong, codesigning prevents loading, +// etc.) then the typical result is a silent failure and we end up testing +// /usr/lib/libobjc.A.dylib instead. This test detects when DYLD_LIBRARY_PATH is +// set but libobjc isn't loaded from it. +int main(int argc __unused, char **argv) { + char *containingDirectory = realpath(dirname(argv[0]), NULL); + testprintf("containingDirectory is %s\n", containingDirectory); + + char *dyldLibraryPath = getenv("DYLD_LIBRARY_PATH"); + testprintf("DYLD_LIBRARY_PATH is %s\n", dyldLibraryPath); + + if (dyldLibraryPath != NULL && strlen(dyldLibraryPath) > 0) { + int foundMatch = 0; + int foundNonMatch = 0; + + dyldLibraryPath = strdup(dyldLibraryPath); + + Dl_info info; + int success = dladdr((void *)objc_msgSend, &info); + testassert(success); + + testprintf("libobjc is located at %s\n", info.dli_fname); + + char *cursor = dyldLibraryPath; + char *path; + while ((path = strsep(&cursor, ":"))) { + char *resolved = realpath(path, NULL); + testprintf("Resolved %s to %s\n", path, resolved); + if (strcmp(resolved, containingDirectory) == 0) { + testprintf("This is equal to our containing directory, ignoring.\n"); + continue; + } + testprintf("Comparing %s and %s\n", resolved, info.dli_fname); + int comparison = strncmp(resolved, info.dli_fname, strlen(resolved)); + free(resolved); + if (comparison == 0) { + testprintf("Found a match!\n"); + foundMatch = 1; + break; + } else { + foundNonMatch = 1; + } + } + + testprintf("Finished searching, foundMatch=%d foundNonMatch=%d\n", foundMatch, foundNonMatch); + testassert(foundMatch || !foundNonMatch); + } + succeed(__FILE__); +} diff --git a/test/load-noobjc.m b/test/load-noobjc.m index 4dd9f86..c2be1a0 100644 --- a/test/load-noobjc.m +++ b/test/load-noobjc.m @@ -1,4 +1,8 @@ /* +dyld3 calls the load callback with its own internal lock held, which causes +this test to deadlock. Disable the test in dyld3 mode. If +rdar://problem/53769512 is fixed then remove this. +TEST_CONFIG DYLD=2 TEST_BUILD $C{COMPILE} $DIR/load-noobjc.m -o load-noobjc.exe $C{COMPILE} $DIR/load-noobjc2.m -o libload-noobjc2.dylib -bundle -bundle_loader load-noobjc.exe diff --git a/test/methodCacheLeaks.m b/test/methodCacheLeaks.m index 968bf5a..cb624c0 100644 --- a/test/methodCacheLeaks.m +++ b/test/methodCacheLeaks.m @@ -61,5 +61,10 @@ int main() exit(1); } wait4(pid, NULL, 0, NULL); - printf("objs=%p\n", objs); + + // Clean up. Otherwise leaks can end up seeing this as a leak, oddly enough. + for (int i = 0; i < classCount; i++) { + [objs[i] release]; + } + free(objs); } diff --git a/test/methodListSmall.h b/test/methodListSmall.h new file mode 100644 index 0000000..233e9c0 --- /dev/null +++ b/test/methodListSmall.h @@ -0,0 +1,236 @@ +#include "test.h" + +struct ObjCClass { + struct ObjCClass * __ptrauth_objc_isa_pointer isa; + struct ObjCClass * __ptrauth_objc_super_pointer superclass; + void *cachePtr; + uintptr_t zero; + struct ObjCClass_ro *data; +}; + +struct ObjCClass_ro { + uint32_t flags; + uint32_t instanceStart; + uint32_t instanceSize; +#ifdef __LP64__ + uint32_t reserved; +#endif + + const uint8_t * ivarLayout; + + const char * name; + struct ObjCMethodList * __ptrauth_objc_method_list_pointer baseMethodList; + struct protocol_list_t * baseProtocols; + const struct ivar_list_t * ivars; + + const uint8_t * weakIvarLayout; + struct property_list_t *baseProperties; +}; + +struct ObjCMethod { + char *name; + char *type; + IMP imp; +}; + +struct ObjCMethodList { + uint32_t sizeAndFlags; + uint32_t count; + struct ObjCMethod methods[]; +}; + +struct ObjCMethodSmall { + int32_t nameOffset; + int32_t typeOffset; + int32_t impOffset; +}; + +struct ObjCMethodListSmall { + uint32_t sizeAndFlags; + uint32_t count; + struct ObjCMethodSmall methods[]; +}; + + +extern struct ObjCClass OBJC_METACLASS_$_NSObject; +extern struct ObjCClass OBJC_CLASS_$_NSObject; + + +struct ObjCClass_ro FooMetaclass_ro = { + .flags = 1, + .instanceStart = 40, + .instanceSize = 40, + .name = "Foo", +}; + +struct ObjCClass FooMetaclass = { + .isa = &OBJC_METACLASS_$_NSObject, + .superclass = &OBJC_METACLASS_$_NSObject, + .cachePtr = &_objc_empty_cache, + .data = &FooMetaclass_ro, +}; + + +int ranMyMethod1; +extern "C" void myMethod1(id self __unused, SEL _cmd) { + testprintf("myMethod1\n"); + testassert(_cmd == @selector(myMethod1)); + ranMyMethod1 = 1; +} + +int ranMyMethod2; +extern "C" void myMethod2(id self __unused, SEL _cmd) { + testprintf("myMethod2\n"); + testassert(_cmd == @selector(myMethod2)); + ranMyMethod2 = 1; +} + +int ranMyMethod3; +extern "C" void myMethod3(id self __unused, SEL _cmd) { + testprintf("myMethod3\n"); + testassert(_cmd == @selector(myMethod3)); + ranMyMethod3 = 1; +} + +int ranMyReplacedMethod1; +extern "C" void myReplacedMethod1(id self __unused, SEL _cmd) { + testprintf("myReplacedMethod1\n"); + testassert(_cmd == @selector(myMethod1)); + ranMyReplacedMethod1 = 1; +} + +int ranMyReplacedMethod2; +extern "C" void myReplacedMethod2(id self __unused, SEL _cmd) { + testprintf("myReplacedMethod2\n"); + testassert(_cmd == @selector(myMethod2)); + ranMyReplacedMethod2 = 1; +} + +struct BigStruct { + uintptr_t a, b, c, d, e, f, g; +}; + +int ranMyMethodStret; +extern "C" BigStruct myMethodStret(id self __unused, SEL _cmd) { + testprintf("myMethodStret\n"); + testassert(_cmd == @selector(myMethodStret)); + ranMyMethodStret = 1; + BigStruct ret = {}; + return ret; +} + +int ranMyReplacedMethodStret; +extern "C" BigStruct myReplacedMethodStret(id self __unused, SEL _cmd) { + testprintf("myReplacedMethodStret\n"); + testassert(_cmd == @selector(myMethodStret)); + ranMyReplacedMethodStret = 1; + BigStruct ret = {}; + return ret; +} + +extern struct ObjCMethodList Foo_methodlistSmall; + +asm(R"ASM( +.section __TEXT,__cstring +_MyMethod1Name: + .asciz "myMethod1" +_MyMethod2Name: + .asciz "myMethod2" +_MyMethod3Name: + .asciz "myMethod3" +_BoringMethodType: + .asciz "v16@0:8" +_MyMethodStretName: + .asciz "myMethodStret" +_MyMethodNullTypesName: + .asciz "myMethodNullTypes" +_StretType: + .asciz "{BigStruct=QQQQQQQ}16@0:8" +)ASM"); + +#if __LP64__ +asm(R"ASM( +.section __DATA,__objc_selrefs,literal_pointers,no_dead_strip +_MyMethod1NameRef: + .quad _MyMethod1Name +_MyMethod2NameRef: + .quad _MyMethod2Name +_MyMethod3NameRef: + .quad _MyMethod3Name +_MyMethodStretNameRef: + .quad _MyMethodStretName +_MyMethodNullTypesNameRef: + .quad _MyMethodNullTypesName +)ASM"); +#else +asm(R"ASM( +.section __DATA,__objc_selrefs,literal_pointers,no_dead_strip +_MyMethod1NameRef: + .long _MyMethod1Name +_MyMethod2NameRef: + .long _MyMethod2Name +_MyMethod3NameRef: + .long _MyMethod3Name +_MyMethodStretNameRef: + .long _MyMethodStretName +_MyMethodNullTypesNameRef: + .long _MyMethodNullTypesName +)ASM"); +#endif + +#if MUTABLE_METHOD_LIST +asm(".section __DATA,__objc_methlist\n"); +#else +asm(".section __TEXT,__objc_methlist\n"); +#endif + +asm(R"ASM( + .p2align 2 +_Foo_methodlistSmall: + .long 12 | 0x80000000 + .long 5 + + .long _MyMethod1NameRef - . + .long _BoringMethodType - . + .long _myMethod1 - . + + .long _MyMethod2NameRef - . + .long _BoringMethodType - . + .long _myMethod2 - . + + .long _MyMethod3NameRef - . + .long _BoringMethodType - . + .long _myMethod3 - . + + .long _MyMethodStretNameRef - . + .long _StretType - . + .long _myMethodStret - . + + .long _MyMethodNullTypesNameRef - . + .long 0 + .long _myMethod1 - . +)ASM"); + +struct ObjCClass_ro Foo_ro = { + .instanceStart = 8, + .instanceSize = 8, + .name = "Foo", + .baseMethodList = &Foo_methodlistSmall, +}; + +struct ObjCClass FooClass = { + .isa = &FooMetaclass, + .superclass = &OBJC_CLASS_$_NSObject, + .cachePtr = &_objc_empty_cache, + .data = &Foo_ro, +}; + + +@interface Foo: NSObject + +- (void)myMethod1; +- (void)myMethod2; +- (void)myMethod3; +- (BigStruct)myMethodStret; + +@end diff --git a/test/methodListSmall.mm b/test/methodListSmall.mm new file mode 100644 index 0000000..82f157a --- /dev/null +++ b/test/methodListSmall.mm @@ -0,0 +1,95 @@ +// TEST_CFLAGS -std=c++11 + +#include "methodListSmall.h" + +void testClass(Class c) { + id foo = [c new]; + [foo myMethod1]; + testassert(ranMyMethod1); + [foo myMethod2]; + testassert(ranMyMethod2); + [foo myMethod3]; + testassert(ranMyMethod3); + + Method m1 = class_getInstanceMethod(c, @selector(myMethod1)); + testassert(m1); + testassert(method_getName(m1) == @selector(myMethod1)); + testassert(strcmp(method_getTypeEncoding(m1), "v16@0:8") == 0); + testassert(method_getImplementation(m1) == (IMP)myMethod1); + + method_setImplementation(m1, (IMP)myReplacedMethod1); + testassert(method_getImplementation(m1) == (IMP)myReplacedMethod1); + [foo myMethod1]; + testassert(ranMyReplacedMethod1); + + Method m2 = class_getInstanceMethod(c, @selector(myMethod2)); + auto method_invoke_cast = (void (*)(id, Method))method_invoke; + + ranMyMethod2 = 0; + method_invoke_cast(foo, m2); + testassert(ranMyMethod2); + + method_setImplementation(m2, (IMP)myReplacedMethod2); + method_invoke_cast(foo, m2); + testassert(ranMyReplacedMethod2); + + Method mstret = class_getInstanceMethod(c, @selector(myMethodStret)); +#if __arm64__ + // No _stret variant on ARM64. We'll test struct return through + // method_invoke anyway just to be thorough. + auto method_invoke_stret_cast = (BigStruct (*)(id, Method))method_invoke; +#else + auto method_invoke_stret_cast = (BigStruct (*)(id, Method))method_invoke_stret; +#endif + + [foo myMethodStret]; + testassert(ranMyMethodStret); + + ranMyMethodStret = 0; + method_invoke_stret_cast(foo, mstret); + testassert(ranMyMethodStret); + + method_setImplementation(mstret, (IMP)myReplacedMethodStret); + [foo myMethodStret]; + testassert(ranMyReplacedMethodStret); + + ranMyReplacedMethodStret = 0; + method_invoke_stret_cast(foo, mstret); + testassert(ranMyReplacedMethodStret); + + auto *desc1 = method_getDescription(m1); + testassert(desc1->name == @selector(myMethod1)); + testassert(desc1->types == method_getTypeEncoding(m1)); + + auto *desc2 = method_getDescription(m2); + testassert(desc2->name == @selector(myMethod2)); + testassert(desc2->types == method_getTypeEncoding(m2)); + + auto *descstret = method_getDescription(mstret); + testassert(descstret->name == @selector(myMethodStret)); + testassert(descstret->types == method_getTypeEncoding(mstret)); + + Method nullTypeMethod = class_getInstanceMethod(c, @selector(myMethodNullTypes)); + testassert(nullTypeMethod); + testassert(method_getName(nullTypeMethod) == @selector(myMethodNullTypes)); + testassertequal(method_getTypeEncoding(nullTypeMethod), NULL); + testassertequal(method_getImplementation(nullTypeMethod), (IMP)myMethod1); +} + +int main() { + Class fooClass = (__bridge Class)&FooClass; + + // Make sure this class can be duplicated and works as expected. + // Duplicate it before testClass mucks around with the methods. + // Need to realize fooClass before duplicating it, hence the + // class message. + Class dupedClass = objc_duplicateClass([fooClass class], "FooDup", 0); + + testprintf("Testing class.\n"); + testClass(fooClass); + + testprintf("Testing duplicate class.\n"); + testClass(dupedClass); + + succeed(__FILE__); +} diff --git a/test/nonpointerisa.m b/test/nonpointerisa.m index dbe222c..659aed5 100644 --- a/test/nonpointerisa.m +++ b/test/nonpointerisa.m @@ -14,7 +14,9 @@ # if __x86_64__ # define RC_ONE (1ULL<<56) # elif __arm64__ && __LP64__ -# define RC_ONE (1ULL<<45) +// Quiet the warning about redefining the macro from isa.h. +# undef RC_ONE +# define RC_ONE (objc_debug_isa_magic_value == 1 ? 1ULL<<56 : 1ULL<<45) # elif __ARM_ARCH_7K__ >= 2 || (__arm64__ && !__LP64__) # define RC_ONE (1ULL<<25) # else @@ -29,9 +31,9 @@ void check_raw_pointer(id obj, Class cls) testassert(!NONPOINTER(obj)); uintptr_t isa = ISA(obj); - testassert((Class)isa == cls); - testassert((Class)(isa & objc_debug_isa_class_mask) == cls); - testassert((Class)(isa & ~objc_debug_isa_class_mask) == 0); + testassertequal(ptrauth_strip((void *)isa, ptrauth_key_process_independent_data), (void *)cls); + testassertequal((Class)(isa & objc_debug_isa_class_mask), cls); + testassertequal(ptrauth_strip((void *)(isa & ~objc_debug_isa_class_mask), ptrauth_key_process_independent_data), 0); CFRetain(obj); testassert(ISA(obj) == isa); @@ -80,44 +82,40 @@ int main() void check_nonpointer(id obj, Class cls) { - testassert(object_getClass(obj) == cls); + testassertequal(object_getClass(obj), cls); testassert(NONPOINTER(obj)); uintptr_t isa = ISA(obj); if (objc_debug_indexed_isa_magic_mask != 0) { // Indexed isa. - testassert((isa & objc_debug_indexed_isa_magic_mask) == objc_debug_indexed_isa_magic_value); + testassertequal((isa & objc_debug_indexed_isa_magic_mask), objc_debug_indexed_isa_magic_value); testassert((isa & ~objc_debug_indexed_isa_index_mask) != 0); uintptr_t index = (isa & objc_debug_indexed_isa_index_mask) >> objc_debug_indexed_isa_index_shift; testassert(index < objc_indexed_classes_count); - testassert(objc_indexed_classes[index] == cls); + testassertequal(objc_indexed_classes[index], cls); } else { // Packed isa. - testassert((Class)(isa & objc_debug_isa_class_mask) == cls); + testassertequal((Class)(isa & objc_debug_isa_class_mask), cls); testassert((Class)(isa & ~objc_debug_isa_class_mask) != 0); - testassert((isa & objc_debug_isa_magic_mask) == objc_debug_isa_magic_value); + testassertequal((isa & objc_debug_isa_magic_mask), objc_debug_isa_magic_value); } CFRetain(obj); - testassert(ISA(obj) == isa + RC_ONE); - testassert([obj retainCount] == 2); + testassertequal(ISA(obj), isa + RC_ONE); + testassertequal([obj retainCount], 2); [obj retain]; - testassert(ISA(obj) == isa + RC_ONE*2); - testassert([obj retainCount] == 3); + testassertequal(ISA(obj), isa + RC_ONE*2); + testassertequal([obj retainCount], 3); CFRelease(obj); - testassert(ISA(obj) == isa + RC_ONE); - testassert([obj retainCount] == 2); + testassertequal(ISA(obj), isa + RC_ONE); + testassertequal([obj retainCount], 2); [obj release]; - testassert(ISA(obj) == isa); - testassert([obj retainCount] == 1); + testassertequal(ISA(obj), isa); + testassertequal([obj retainCount], 1); } -@interface OS_object -+(id)alloc; -@end - @interface Fake_OS_object : NSObject { int refcnt; int xref_cnt; @@ -138,7 +136,7 @@ void check_nonpointer(id obj, Class cls) } @end -@interface Sub_OS_object : OS_object @end +@interface Sub_OS_object : NSObject @end @implementation Sub_OS_object @end @@ -147,20 +145,30 @@ void check_nonpointer(id obj, Class cls) int main() { + Class OS_object = objc_getClass("OS_object"); + class_setSuperclass([Sub_OS_object class], OS_object); + uintptr_t isa; #if SUPPORT_PACKED_ISA # if !OBJC_HAVE_NONPOINTER_ISA || !OBJC_HAVE_PACKED_NONPOINTER_ISA || OBJC_HAVE_INDEXED_NONPOINTER_ISA # error wrong # endif - testassert(objc_debug_isa_class_mask == (uintptr_t)&objc_absolute_packed_isa_class_mask); + void *absoluteMask = (void *)&objc_absolute_packed_isa_class_mask; +#if __has_feature(ptrauth_calls) + absoluteMask = ptrauth_strip(absoluteMask, ptrauth_key_process_independent_data); +#endif + // absoluteMask should "cover" objc_debug_isa_class_mask + testassert((objc_debug_isa_class_mask & (uintptr_t)absoluteMask) == objc_debug_isa_class_mask); + // absoluteMask should only possibly differ in the high bits + testassert((objc_debug_isa_class_mask & 0xffff) == ((uintptr_t)absoluteMask & 0xffff)); // Indexed isa variables DO NOT exist on packed-isa platforms testassert(!dlsym(RTLD_DEFAULT, "objc_absolute_indexed_isa_magic_mask")); testassert(!dlsym(RTLD_DEFAULT, "objc_absolute_indexed_isa_magic_value")); testassert(!dlsym(RTLD_DEFAULT, "objc_absolute_indexed_isa_index_mask")); testassert(!dlsym(RTLD_DEFAULT, "objc_absolute_indexed_isa_index_shift")); - + #elif SUPPORT_INDEXED_ISA # if !OBJC_HAVE_NONPOINTER_ISA || OBJC_HAVE_PACKED_NONPOINTER_ISA || !OBJC_HAVE_INDEXED_NONPOINTER_ISA # error wrong @@ -176,7 +184,7 @@ int main() #else # error unknown nonpointer isa format #endif - + testprintf("Isa with index\n"); id index_o = [Fake_OS_object new]; check_nonpointer(index_o, [Fake_OS_object class]); @@ -193,7 +201,6 @@ int main() objc_setAssociatedObject(index_o, assoc, assoc, OBJC_ASSOCIATION_ASSIGN); testassert(__builtin_popcountl(isa ^ ISA(index_o)) == 1); - testprintf("Isa without index\n"); id raw_o = [OS_object alloc]; check_raw_pointer(raw_o, [OS_object class]); diff --git a/test/preopt-caches.entitlements b/test/preopt-caches.entitlements new file mode 100644 index 0000000..bc4acf2 --- /dev/null +++ b/test/preopt-caches.entitlements @@ -0,0 +1,12 @@ + + + + + com.apple.springboard-ui.client + + com.apple.security.system-groups + + systemgroup.com.apple.powerlog + + + diff --git a/test/preopt-caches.mm b/test/preopt-caches.mm new file mode 100644 index 0000000..7aec275 --- /dev/null +++ b/test/preopt-caches.mm @@ -0,0 +1,380 @@ +/* +TEST_ENTITLEMENTS preopt-caches.entitlements +TEST_CONFIG OS=iphoneos MEM=mrc +TEST_BUILD + mkdir -p $T{OBJDIR} + /usr/sbin/dtrace -h -s $DIR/../runtime/objc-probes.d -o $T{OBJDIR}/objc-probes.h + $C{COMPILE} $DIR/preopt-caches.mm -std=gnu++17 -isystem $C{SDK_PATH}/System/Library/Frameworks/System.framework/PrivateHeaders -I$T{OBJDIR} -ldsc -o preopt-caches.exe +END +*/ +// +// check_preopt_caches.m +// check-preopt-caches +// +// Created by Thomas Deniau on 11/06/2020. +// + +#define TEST_CALLS_OPERATOR_NEW + +#include "test-defines.h" +#include "../runtime/objc-private.h" +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test.h" + +int validate_dylib_in_forked_process(const char * const toolPath, const char * const dylib) +{ + int out_pipe[2] = {-1}; + int err_pipe[2] = {-1}; + int exit_code = -1; + pid_t pid = 0; + int rval = 0; + + std::string child_stdout; + std::string child_stderr; + + posix_spawn_file_actions_t actions = NULL; + const char * const args[] = {toolPath, dylib, NULL}; + int ret = 0; + + if (pipe(out_pipe)) { + exit(3); + } + + if (pipe(err_pipe)) { + exit(3); + } + + //Do-si-do the FDs + posix_spawn_file_actions_init(&actions); + posix_spawn_file_actions_addclose(&actions, out_pipe[0]); + posix_spawn_file_actions_addclose(&actions, err_pipe[0]); + posix_spawn_file_actions_adddup2(&actions, out_pipe[1], 1); + posix_spawn_file_actions_adddup2(&actions, err_pipe[1], 2); + posix_spawn_file_actions_addclose(&actions, out_pipe[1]); + posix_spawn_file_actions_addclose(&actions, err_pipe[1]); + + // Fork so that we can dlopen the dylib in a clean context + ret = posix_spawnp(&pid, args[0], &actions, NULL, (char * const *)args, NULL); + + if (ret != 0) { + fail("posix_spawn for %s failed: returned %d, %s\n", dylib, ret, strerror(ret)); + exit(3); + } + + posix_spawn_file_actions_destroy(&actions); + close(out_pipe[1]); + close(err_pipe[1]); + + std::string buffer(4096,' '); + std::vector plist = { {out_pipe[0],POLLIN,0}, {err_pipe[0],POLLIN,0} }; + while (( (rval = poll(&plist[0],(nfds_t)plist.size(), 100000)) > 0 ) || ((rval < 0) && (errno == EINTR))) { + if (rval < 0) { + // EINTR + continue; + } + + ssize_t bytes_read = 0; + + if (plist[0].revents&(POLLERR|POLLHUP) || plist[1].revents&(POLLERR|POLLHUP)) { + bytes_read = read(out_pipe[0], &buffer[0], buffer.length()); + bytes_read = read(err_pipe[0], &buffer[0], buffer.length()); + break; + } + + if (plist[0].revents&POLLIN) { + bytes_read = read(out_pipe[0], &buffer[0], buffer.length()); + child_stdout += buffer.substr(0, static_cast(bytes_read)); + } + else if ( plist[1].revents&POLLIN ) { + bytes_read = read(err_pipe[0], &buffer[0], buffer.length()); + child_stderr += buffer.substr(0, static_cast(bytes_read)); + } + else break; // nothing left to read + + plist[0].revents = 0; + plist[1].revents = 0; + } + if (rval == 0) { + // Early timeout so try to clean up. + fail("Failed to validate dylib %s: timeout!\n", dylib); + return 1; + } + + + if (err_pipe[0] != -1) { + close(err_pipe[0]); + } + + if (out_pipe[0] != -1) { + close(out_pipe[0]); + } + + if (pid != 0) { + if (waitpid(pid, &exit_code, 0) < 0) { + fail("Could not wait for PID %d (dylib %s): err %s\n", pid, dylib, strerror(errno)); + } + + if (!WIFEXITED(exit_code)) { + fail("PID %d (%s) did not exit: %d. stdout: %s\n stderr: %s\n", pid, dylib, exit_code, child_stdout.c_str(), child_stderr.c_str()); + } + if (WEXITSTATUS(exit_code) != 0) { + fail("Failed to validate dylib %s\nstdout: %s\nstderr: %s\n", dylib, child_stdout.c_str(), child_stderr.c_str()); + } + } + + testprintf("%s", child_stdout.c_str()); + + return 0; +} + +bool check_class(Class cls, unsigned & cacheCount) { + // printf("%s %s\n", class_getName(cls), class_isMetaClass(cls) ? "(metaclass)" : ""); + + // For the initialization of the cache so that we setup the constant cache if any + class_getMethodImplementation(cls, @selector(initialize)); + + if (objc_cache_isConstantOptimizedCache(&(cls->cache), true, (uintptr_t)&_objc_empty_cache)) { + cacheCount++; + // printf("%s has a preopt cache\n", class_getName(cls)); + + // Make the union of all selectors until the preopt fallback class + const class_ro_t * fallback = ((const objc_class *) objc_cache_preoptFallbackClass(&(cls->cache)))->data()->ro(); + + std::unordered_map methods; + + Method *methodList; + unsigned count; + Class currentClass = cls; + unsigned dynamicCount = 0; + while (currentClass->data()->ro() != fallback) { + methodList = class_copyMethodList(currentClass, &count); + // printf("%d methods in method list for %s\n", count, class_getName(currentClass)); + for (unsigned i = 0 ; i < count ; i++) { + SEL sel = method_getName(methodList[i]); + if (methods.find(sel) == methods.end()) { + const char *name = sel_getName(sel); + // printf("[dynamic] %s -> %p\n", name, method_getImplementation(methodList[i])); + methods[sel] = ptrauth_strip(method_getImplementation(methodList[i]), ptrauth_key_function_pointer); + if ( (currentClass == cls) || + ( (strcmp(name, ".cxx_construct") != 0) + && (strcmp(name, ".cxx_destruct") != 0))) { + dynamicCount++; + } + } + } + if (count > 0) { + free(methodList); + } + currentClass = class_getSuperclass(currentClass); + } + + // Check we have an equality between the two caches + + // Count the methods in the preopt cache + unsigned preoptCacheCount = 0; + unsigned capacity = objc_cache_preoptCapacity(&(cls->cache)); + const preopt_cache_entry_t *buckets = objc_cache_preoptCache(&(cls->cache))->entries; + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcast-of-sel-type" + const uint8_t *selOffsetsBase = (const uint8_t*)@selector(🤯); +#pragma clang diagnostic pop + for (unsigned i = 0 ; i < capacity ; i++) { + uint32_t selOffset = buckets[i].sel_offs; + if (selOffset != 0xFFFFFFFF) { + SEL sel = (SEL)(selOffsetsBase + selOffset); + IMP imp = (IMP)((uint8_t*)cls - buckets[i].imp_offs); + if (methods.find(sel) == methods.end()) { + fail("ERROR: %s: %s not found in dynamic method list\n", class_getName(cls), sel_getName(sel)); + return false; + } + IMP dynamicImp = methods.at(sel); + // printf("[static] %s -> %p\n", sel_getName(sel), imp); + if (imp != dynamicImp) { + fail("ERROR: %s: %s has different implementations %p vs %p in static and dynamic caches", class_getName(cls), sel_getName(sel), imp, dynamicImp); + return false; + } + preoptCacheCount++; + } + } + + if (preoptCacheCount != dynamicCount) { + testwarn("Methods in preopt cache:\n"); + + for (unsigned i = 0 ; i < capacity ; i++) { + uint32_t selOffset = buckets[i].sel_offs; + if (selOffset != 0xFFFFFFFF) { + SEL sel = (SEL)(selOffsetsBase + selOffset); + testwarn("%s\n", sel_getName(sel)); + } + } + + testwarn("Methods in dynamic cache:\n"); + + for (const auto & [sel, imp] : methods) { + testwarn("%s\n", sel_getName(sel)); + } + + fail("ERROR: %s's preoptimized cache is missing some methods\n", class_getName(cls)); + + return false; + } + + } else { + // printf("%s does NOT have a preopt cache\n", class_getName(cls)); + } + + return true; +} + +bool check_library(const char *path) { + std::set blacklistedClasses { + "PNPWizardScratchpadInkView", // Can only be +initialized on Pencil-capable devices + "CACDisplayManager", // rdar://64929282 (CACDisplayManager does layout in +initialize!) + }; + + testprintf("Checking %s… ", path); + + __unused void *lib = dlopen(path, RTLD_NOW); + extern uint32_t _dyld_image_count(void) __OSX_AVAILABLE_STARTING(__MAC_10_1, __IPHONE_2_0); + unsigned outCount = 0; + + // Realize all classes first. + Class *allClasses = objc_copyClassList(&outCount); + if (allClasses != NULL) { + free(allClasses); + } + + allClasses = objc_copyClassesForImage(path, &outCount); + if (allClasses != NULL) { + unsigned classCount = 0; + unsigned cacheCount = 0; + + for (const Class * clsPtr = allClasses ; *clsPtr != nil ; clsPtr++) { + classCount++; + Class cls = *clsPtr; + + if (blacklistedClasses.find(class_getName(cls)) != blacklistedClasses.end()) { + continue; + } + + if (!check_class(cls, cacheCount)) { + return false; + } + + if (!class_isMetaClass(cls)) { + if (!check_class(object_getClass(cls), cacheCount)) { + return false; + } + } + } + testprintf("checked %d caches in %d classes\n", cacheCount, classCount); + free(allClasses); + } else { + testprintf("could not find %s or no class names inside\n", path); + } + + return true; +} + +size_t size_of_shared_cache_with_uuid(uuid_t uuid) { + DIR* dfd = opendir(IPHONE_DYLD_SHARED_CACHE_DIR); + if (!dfd) { + fail("Error: unable to open shared cache dir %s\n", + IPHONE_DYLD_SHARED_CACHE_DIR); + exit(1); + } + + uint64_t shared_cache_size = 0; + + struct dirent *dp; + while ((dp = readdir(dfd))) { + char full_filename[512]; + snprintf(full_filename, sizeof(full_filename), "%s%s", + IPHONE_DYLD_SHARED_CACHE_DIR, dp->d_name); + + struct stat stat_buf; + if (stat(full_filename, &stat_buf) != 0) + continue; + + if ((stat_buf.st_mode & S_IFMT) == S_IFDIR) + continue; + + int fd = open(full_filename, O_RDONLY); + if (fd < 0) { + fprintf(stderr, "Error: unable to open file %s\n", full_filename); + continue; + } + + struct dyld_cache_header header; + if (read(fd, &header, sizeof(header)) != sizeof(header)) { + fprintf(stderr, "Error: unable to read dyld shared cache header from %s\n", + full_filename); + close(fd); + continue; + } + + if (uuid_compare(header.uuid, uuid) == 0) { + shared_cache_size = stat_buf.st_size; + break; + } + } + + closedir(dfd); + + return shared_cache_size; +} + +int main (int argc, const char * argv[]) +{ + if (argc == 1) { + int err = 0; + dyld_process_info process_info = _dyld_process_info_create(mach_task_self(), 0, &err); + if (NULL == process_info) { + mach_error("_dyld_process_info_create", err); + fail("_dyld_process_info_create"); + return 2; + } + dyld_process_cache_info cache_info; + _dyld_process_info_get_cache(process_info, &cache_info); + + __block std::set dylibsSet; + size_t size = size_of_shared_cache_with_uuid(cache_info.cacheUUID); + dyld_shared_cache_iterate((void*)cache_info.cacheBaseAddress, (uint32_t)size, ^(const dyld_shared_cache_dylib_info* dylibInfo, __unused const dyld_shared_cache_segment_info* segInfo) { + if (dylibInfo->isAlias) return; + std::string path(dylibInfo->path); + dylibsSet.insert(path); + }); + std::vector dylibs(dylibsSet.begin(), dylibsSet.end()); + + dispatch_apply(dylibs.size(), DISPATCH_APPLY_AUTO, ^(size_t idx) { + validate_dylib_in_forked_process(argv[0], dylibs[idx].c_str()); + }); + } else { + const char *libraryName = argv[1]; + if (!check_library(libraryName)) { + fail("checking library %s\n", libraryName); + return 1; + } + } + + succeed(__FILE__); + return 0; +} diff --git a/test/protocolSmall.m b/test/protocolSmall.m new file mode 100644 index 0000000..a3f6fa6 --- /dev/null +++ b/test/protocolSmall.m @@ -0,0 +1,91 @@ +// TEST_CFLAGS -framework Foundation +// need Foundation to get NSObject compatibility additions for class Protocol +// because ARC calls [protocol retain] +/* +TEST_BUILD_OUTPUT +.*protocolSmall.m:\d+:\d+: warning: cannot find protocol definition for 'SmallProto' +.*protocolSmall.m:\d+:\d+: note: protocol 'SmallProto' has no definition +END +*/ + +#include "test.h" +#include "testroot.i" +#include + +struct MethodListOneEntry { + uint32_t entSizeAndFlags; + uint32_t count; + SEL name; + const char *types; + void *imp; +}; + +struct SmallProtoStructure { + Class isa; + const char *mangledName; + struct protocol_list_t *protocols; + void *instanceMethods; + void *classMethods; + void *optionalInstanceMethods; + void *optionalClassMethods; + void *instanceProperties; + uint32_t size; // sizeof(protocol_t) + uint32_t flags; +}; + +struct MethodListOneEntry SmallProtoMethodList = { + .entSizeAndFlags = 3 * sizeof(void *), + .count = 1, + .name = NULL, + .types = "v@:", + .imp = NULL, +}; + +struct SmallProtoStructure SmallProtoData + __asm__("__OBJC_PROTOCOL_$_SmallProto") + = { + .mangledName = "SmallProto", + .instanceMethods = &SmallProtoMethodList, + .size = sizeof(struct SmallProtoStructure), +}; + +void *SmallProtoListEntry + __attribute__((section("__DATA,__objc_protolist,coalesced,no_dead_strip"))) + = &SmallProtoData; + +@protocol SmallProto; +@protocol NormalProto +- (void)protoMethod; +@end + +@interface C: TestRoot @end +@implementation C +- (void)protoMethod {} +@end + +int main() +{ + // Fix up the method list selector by hand, getting the compiler to generate a + // proper selref as a compile-time constant is a pain. + SmallProtoMethodList.name = @selector(protoMethod); + unsigned protoCount; + + Protocol * __unsafe_unretained *protos = class_copyProtocolList([C class], &protoCount); + for (unsigned i = 0; i < protoCount; i++) { + testprintf("Checking index %u protocol %p\n", i, protos[i]); + const char *name = protocol_getName(protos[i]); + testprintf("Name is %s\n", name); + testassert(strcmp(name, "SmallProto") == 0 || strcmp(name, "NormalProto") == 0); + + objc_property_t *classProperties = protocol_copyPropertyList2(protos[i], NULL, YES, NO); + testassert(classProperties == NULL); + + struct objc_method_description desc = protocol_getMethodDescription(protos[i], @selector(protoMethod), YES, YES); + testprintf("Protocol protoMethod name is %s types are %s\n", desc.name, desc.types); + testassert(desc.name == @selector(protoMethod)); + testassert(desc.types[0] == 'v'); + } + free(protos); + + succeed(__FILE__); +} diff --git a/test/readClassPair.m b/test/readClassPair.m index 80313b2..ebc8587 100644 --- a/test/readClassPair.m +++ b/test/readClassPair.m @@ -48,10 +48,16 @@ int main() // Read a non-root class. testassert(!objc_getClass("Sub")); - extern intptr_t OBJC_CLASS_$_Sub[OBJC_MAX_CLASS_SIZE/sizeof(void*)]; + // Clang assumes too much alignment on this by default (rdar://problem/60881608), + // so tell it that it's only as aligned as an intptr_t. + extern _Alignas(intptr_t) intptr_t OBJC_CLASS_$_Sub[OBJC_MAX_CLASS_SIZE/sizeof(void*)]; // Make a duplicate of class Sub for use later. intptr_t Sub2_buf[OBJC_MAX_CLASS_SIZE/sizeof(void*)]; memcpy(Sub2_buf, &OBJC_CLASS_$_Sub, sizeof(Sub2_buf)); + // Re-sign the isa and super pointers in the new location. + ((Class __ptrauth_objc_isa_pointer *)(void *)Sub2_buf)[0] = ((Class __ptrauth_objc_isa_pointer *)(void *)&OBJC_CLASS_$_Sub)[0]; + ((Class __ptrauth_objc_super_pointer *)(void *)Sub2_buf)[1] = ((Class __ptrauth_objc_super_pointer *)(void *)&OBJC_CLASS_$_Sub)[1]; + Class Sub = objc_readClassPair((__bridge Class)(void*)&OBJC_CLASS_$_Sub, &ii); testassert(Sub); diff --git a/test/rr-sidetable.m b/test/rr-sidetable.m index daa4090..ac3606a 100644 --- a/test/rr-sidetable.m +++ b/test/rr-sidetable.m @@ -9,7 +9,7 @@ #include "test.h" #import -#define OBJECTS 1 +#define OBJECTS 10 #define LOOPS 256 #define THREADS 16 #if __x86_64__ diff --git a/test/runtime.m b/test/runtime.m index 50bd68c..4e22606 100644 --- a/test/runtime.m +++ b/test/runtime.m @@ -221,6 +221,13 @@ int main() testassert(strcmp(class_getName([SwiftV1Class3 class]), class_getName(object_getClass([SwiftV1Class3 class]))) == 0); testassert(strcmp(class_getName([SwiftV1Class4 class]), class_getName(object_getClass([SwiftV1Class4 class]))) == 0); + testassert(!_class_isSwift([TestRoot class])); + testassert(!_class_isSwift([Sub class])); + testassert(_class_isSwift([SwiftV1Class class])); + testassert(_class_isSwift([SwiftV1Class2 class])); + testassert(_class_isSwift([SwiftV1Class3 class])); + testassert(_class_isSwift([SwiftV1Class4 class])); + succeed(__FILE__); } diff --git a/test/setAssociatedObjectHook.m b/test/setAssociatedObjectHook.m index e244d5c..97f78c1 100644 --- a/test/setAssociatedObjectHook.m +++ b/test/setAssociatedObjectHook.m @@ -1,47 +1,46 @@ -// TEST_CONFIG +/* + TEST_CONFIG MEM=mrc + TEST_ENV OBJC_DISABLE_NONPOINTER_ISA=YES +*/ #include "test.h" #include "testroot.i" -id sawObject; -const void *sawKey; -id sawValue; -objc_AssociationPolicy sawPolicy; +bool hasAssociations = false; -objc_hook_setAssociatedObject originalSetAssociatedObject; +@interface TestRoot (AssocHooks) +@end -void hook(id _Nonnull object, const void * _Nonnull key, id _Nullable value, objc_AssociationPolicy policy) { - sawObject = object; - sawKey = key; - sawValue = value; - sawPolicy = policy; - originalSetAssociatedObject(object, key, value, policy); +@implementation TestRoot (AssocHooks) + +- (void)_noteAssociatedObjects { + hasAssociations = true; } +// -_noteAssociatedObjects is currently limited to raw-isa custom-rr to avoid overhead +- (void) release { +} + +@end + int main() { id obj = [TestRoot new]; id value = [TestRoot new]; const void *key = "key"; objc_setAssociatedObject(obj, key, value, OBJC_ASSOCIATION_RETAIN); - testassert(sawObject == nil); - testassert(sawKey == nil); - testassert(sawValue == nil); - testassert(sawPolicy == 0); + testassert(hasAssociations == true); id out = objc_getAssociatedObject(obj, key); testassert(out == value); - objc_setHook_setAssociatedObject(hook, &originalSetAssociatedObject); - + hasAssociations = false; key = "key2"; objc_setAssociatedObject(obj, key, value, OBJC_ASSOCIATION_RETAIN); - testassert(sawObject == obj); - testassert(sawKey == key); - testassert(sawValue == value); - testassert(sawPolicy == OBJC_ASSOCIATION_RETAIN); + testassert(hasAssociations == false); //only called once + out = objc_getAssociatedObject(obj, key); testassert(out == value); succeed(__FILE__); -} \ No newline at end of file +} diff --git a/test/supported-inline-refcnt.m b/test/supported-inline-refcnt.m new file mode 100644 index 0000000..bd32ba9 --- /dev/null +++ b/test/supported-inline-refcnt.m @@ -0,0 +1,85 @@ +// TEST_CONFIG MEM=mrc +// TEST_CFLAGS -framework CoreFoundation -Weverything + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Weverything" +#include "test.h" +#pragma clang diagnostic pop +#include +#include +#include + + +// Some warnings just aren't feasible to work around. We'll disable them instead. +#pragma clang diagnostic ignored "-Watomic-implicit-seq-cst" +#pragma clang diagnostic ignored "-Wdirect-ivar-access" +#pragma clang diagnostic ignored "-Wold-style-cast" + +static int deallocCount; +@interface Refcnt: NSObject @end +@implementation Refcnt { + int _rc; +} + +_OBJC_SUPPORTED_INLINE_REFCNT(_rc) + +- (void)dealloc { + deallocCount++; + [super dealloc]; +} + +@end + +@interface MainRefcnt: NSObject @end +@implementation MainRefcnt { + int _rc; +} + +_OBJC_SUPPORTED_INLINE_REFCNT_WITH_DEALLOC2MAIN(_rc) + +- (void)dealloc { + testassert(pthread_main_np()); + deallocCount++; + [super dealloc]; +} + +@end + +int main() +{ + Refcnt *obj = [Refcnt new]; + [obj retain]; + [obj retain]; + [obj retain]; + [obj release]; + [obj release]; + [obj release]; + [obj release]; + testassert(deallocCount == 1); + + MainRefcnt *obj2 = [MainRefcnt new]; + [obj2 retain]; + [obj2 retain]; + [obj2 retain]; + + dispatch_group_t group = dispatch_group_create(); + dispatch_group_async(group, dispatch_get_global_queue(0, 0), ^{ + [obj2 release]; + }); + dispatch_group_async(group, dispatch_get_global_queue(0, 0), ^{ + [obj2 release]; + }); + dispatch_group_async(group, dispatch_get_global_queue(0, 0), ^{ + [obj2 release]; + }); + dispatch_group_async(group, dispatch_get_global_queue(0, 0), ^{ + [obj2 release]; + }); + + dispatch_group_notify(group, dispatch_get_main_queue(), ^{ + testassert(deallocCount == 2); + succeed(__FILE__); + }); + + CFRunLoopRun(); +} diff --git a/test/swift-class-def.m b/test/swift-class-def.m index 6bc2d05..9ca2f16 100644 --- a/test/swift-class-def.m +++ b/test/swift-class-def.m @@ -15,9 +15,15 @@ #if __has_feature(ptrauth_calls) # define SIGNED_METHOD_LIST_IMP "@AUTH(ia,0,addr) " # define SIGNED_STUB_INITIALIZER "@AUTH(ia,0xc671,addr) " +# define SIGNED_METHOD_LIST "@AUTH(da,0xC310,addr) " +# define SIGNED_ISA "@AUTH(da, 0x6AE1, addr) " +# define SIGNED_SUPER "@AUTH(da, 0xB5AB, addr) " #else # define SIGNED_METHOD_LIST_IMP # define SIGNED_STUB_INITIALIZER +# define SIGNED_METHOD_LIST +# define SIGNED_ISA +# define SIGNED_SUPER #endif #define str(x) #x @@ -41,8 +47,8 @@ asm( \ ".section __DATA,__objc_data \n" \ ".align 3 \n" \ "_OBJC_CLASS_$_" #name ": \n" \ - PTR "_OBJC_METACLASS_$_" #name "\n" \ - PTR "_OBJC_CLASS_$_" #superclass "\n" \ + PTR "_OBJC_METACLASS_$_" #name SIGNED_ISA "\n" \ + PTR "_OBJC_CLASS_$_" #superclass SIGNED_SUPER "\n" \ PTR "__objc_empty_cache \n" \ PTR "0 \n" \ PTR "L_" #name "_ro + 2 \n" \ @@ -82,8 +88,8 @@ asm( \ PTR "0 \n" \ \ "_OBJC_METACLASS_$_" #name ": \n" \ - PTR "_OBJC_METACLASS_$_" #superclass "\n" \ - PTR "_OBJC_METACLASS_$_" #superclass "\n" \ + PTR "_OBJC_METACLASS_$_" #superclass SIGNED_ISA "\n" \ + PTR "_OBJC_METACLASS_$_" #superclass SIGNED_SUPER "\n" \ PTR "__objc_empty_cache \n" \ PTR "0 \n" \ PTR "L_" #name "_meta_ro \n" \ @@ -123,7 +129,7 @@ asm( \ ONLY_LP64(".long 0 \n") \ PTR "0 \n" \ PTR "L_" #name "_name \n" \ - PTR "L_" #name "_methods \n" \ + PTR "L_" #name "_methods" SIGNED_METHOD_LIST "\n" \ PTR "0 \n" \ PTR "L_" #name "_ivars \n" \ PTR "0 \n" \ @@ -137,7 +143,7 @@ asm( \ ONLY_LP64(".long 0 \n") \ PTR "0 \n" \ PTR "L_" #name "_name \n" \ - PTR "L_" #name "_meta_methods \n" \ + PTR "L_" #name "_meta_methods" SIGNED_METHOD_LIST "\n" \ PTR "0 \n" \ PTR "0 \n" \ PTR "0 \n" \ diff --git a/test/swiftMetadataInitializerRealloc.m b/test/swiftMetadataInitializerRealloc.m index c50d1dc..9e72211 100644 --- a/test/swiftMetadataInitializerRealloc.m +++ b/test/swiftMetadataInitializerRealloc.m @@ -65,6 +65,9 @@ Class initSub(Class cls, void *arg) // Example: rdar://problem/50707074 Class HeapSwiftSub = (Class)malloc(OBJC_MAX_CLASS_SIZE); memcpy(HeapSwiftSub, RawRealSwiftSub, OBJC_MAX_CLASS_SIZE); + // Re-sign the isa and super pointers in the new location. + ((Class __ptrauth_objc_isa_pointer *)(void *)HeapSwiftSub)[0] = ((Class __ptrauth_objc_isa_pointer *)(void *)RawRealSwiftSub)[0]; + ((Class __ptrauth_objc_super_pointer *)(void *)HeapSwiftSub)[1] = ((Class __ptrauth_objc_super_pointer *)(void *)RawRealSwiftSub)[1]; testprintf("initSub beginning _objc_realizeClassFromSwift\n"); _objc_realizeClassFromSwift(HeapSwiftSub, cls); diff --git a/test/taggedPointers.m b/test/taggedPointers.m index 76f1617..490838b 100644 --- a/test/taggedPointers.m +++ b/test/taggedPointers.m @@ -295,6 +295,22 @@ void testGenericTaggedPointer(objc_tag_index_t tag, Class cls) RELEASE_VAR(w); } +#if OBJC_SPLIT_TAGGED_POINTERS +void testConstantTaggedPointerRoundTrip(void *ptr) +{ + uintptr_t tagged = (uintptr_t)ptr | objc_debug_constant_cfstring_tag_bits; + void *untagged = _objc_getTaggedPointerRawPointerValue((void *)tagged); + testassert(ptr == untagged); +} + +void testConstantTaggedPointers(void) +{ + testConstantTaggedPointerRoundTrip(0); + testConstantTaggedPointerRoundTrip((void *)sizeof(void *)); + testConstantTaggedPointerRoundTrip((void *)(MACH_VM_MAX_ADDRESS - sizeof(void *))); +} +#endif + int main() { testassert(objc_debug_taggedpointer_mask != 0); @@ -336,6 +352,10 @@ int main() objc_getClass("TaggedNSObjectSubclass")); testGenericTaggedPointer(OBJC_TAG_NSManagedObjectID, objc_getClass("TaggedNSObjectSubclass")); + +#if OBJC_SPLIT_TAGGED_POINTERS + testConstantTaggedPointers(); +#endif } POP_POOL; succeed(__FILE__); diff --git a/test/taggedPointersTagObfuscationDisabled.m b/test/taggedPointersTagObfuscationDisabled.m index a3aad8b..e9fee7d 100644 --- a/test/taggedPointersTagObfuscationDisabled.m +++ b/test/taggedPointersTagObfuscationDisabled.m @@ -14,7 +14,13 @@ int main() int main() { - testassert(_objc_getTaggedPointerTag((void *)1) == 0); +#if OBJC_SPLIT_TAGGED_POINTERS + void *obj = (void *)0; +#else + void *obj = (void *)1; +#endif + + testassert(_objc_getTaggedPointerTag(obj) == 0); succeed(__FILE__); } diff --git a/test/test-defines.h b/test/test-defines.h new file mode 100644 index 0000000..0a74274 --- /dev/null +++ b/test/test-defines.h @@ -0,0 +1 @@ +#define TEST_OVERRIDES_NEW 1 diff --git a/test/test.h b/test/test.h index f4332a1..33f223a 100644 --- a/test/test.h +++ b/test/test.h @@ -15,7 +15,8 @@ #include #if __cplusplus #include -using namespace std; +using std::atomic_int; +using std::memory_order_relaxed; #else #include #endif @@ -83,6 +84,40 @@ static inline void fail(const char *msg, ...) #define __testassert(cond, file, line) \ (fail("failed assertion '%s' at %s:%u", cond, __FILE__, __LINE__)) +static inline char *hexstring(uint8_t *data, size_t size) +{ + char *str; + switch (size) { + case sizeof(unsigned long long): + asprintf(&str, "%016llx", *(unsigned long long *)data); + break; + case sizeof(unsigned int): + asprintf(&str, "%08x", *(unsigned int*)data); + break; + case sizeof(uint16_t): + asprintf(&str, "%04x", *(uint16_t *)data); + break; + default: + str = (char *)malloc(size * 2 + 1); + for (size_t i = 0; i < size; i++) { + sprintf(str + i, "%02x", data[i]); + } + } + return str; +} + +static inline void failnotequal(uint8_t *lhs, size_t lhsSize, uint8_t *rhs, size_t rhsSize, const char *lhsStr, const char *rhsStr, const char *file, unsigned line) +{ + fprintf(stderr, "BAD: failed assertion '%s != %s' (0x%s != 0x%s) at %s:%u\n", lhsStr, rhsStr, hexstring(lhs, lhsSize), hexstring(rhs, rhsSize), file, line); + exit(1); +} + +#define testassertequal(lhs, rhs) do {\ + __typeof__(lhs) __lhs = lhs; \ + __typeof__(rhs) __rhs = rhs; \ + if ((lhs) != (rhs)) failnotequal((uint8_t *)&__lhs, sizeof(__lhs), (uint8_t *)&__rhs, sizeof(__rhs), #lhs, #rhs, __FILE__, __LINE__); \ +} while(0) + /* time-sensitive assertion, disabled under valgrind */ #define timecheck(name, time, fast, slow) \ if (getenv("VALGRIND") && 0 != strcmp(getenv("VALGRIND"), "NO")) { \ @@ -143,6 +178,19 @@ static inline void testwarn(const char *msg, ...) static inline void testnoop() { } +// Are we running in dyld3 mode? +// Note: checks by looking for the DYLD_USE_CLOSURES environment variable. +// This is is always set by our test script, but this won't give the right +// answer when being run manually unless that variable is set. +static inline bool testdyld3(void) { + static int dyld = 0; + if (dyld == 0) { + const char *useClosures = getenv("DYLD_USE_CLOSURES"); + dyld = useClosures && useClosures[0] == '1' ? 3 : 2; + } + return dyld == 3; +} + // Prevent deprecation warnings from some runtime functions. static inline void test_objc_flush_caches(Class cls) @@ -195,17 +243,20 @@ static inline void testonthread(__unsafe_unretained testblock_t code) `#define TEST_CALLS_OPERATOR_NEW` before including test.h. */ #if __cplusplus && !defined(TEST_CALLS_OPERATOR_NEW) +#if !defined(TEST_OVERRIDES_NEW) +#define TEST_OVERRIDES_NEW 1 +#endif #pragma clang diagnostic push #pragma clang diagnostic ignored "-Winline-new-delete" #import -inline void* operator new(std::size_t) throw (std::bad_alloc) { fail("called global operator new"); } -inline void* operator new[](std::size_t) throw (std::bad_alloc) { fail("called global operator new[]"); } -inline void* operator new(std::size_t, const std::nothrow_t&) throw() { fail("called global operator new(nothrow)"); } -inline void* operator new[](std::size_t, const std::nothrow_t&) throw() { fail("called global operator new[](nothrow)"); } -inline void operator delete(void*) throw() { fail("called global operator delete"); } -inline void operator delete[](void*) throw() { fail("called global operator delete[]"); } -inline void operator delete(void*, const std::nothrow_t&) throw() { fail("called global operator delete(nothrow)"); } -inline void operator delete[](void*, const std::nothrow_t&) throw() { fail("called global operator delete[](nothrow)"); } +inline void* operator new(std::size_t) { fail("called global operator new"); } +inline void* operator new[](std::size_t) { fail("called global operator new[]"); } +inline void* operator new(std::size_t, const std::nothrow_t&) noexcept(true) { fail("called global operator new(nothrow)"); } +inline void* operator new[](std::size_t, const std::nothrow_t&) noexcept(true) { fail("called global operator new[](nothrow)"); } +inline void operator delete(void*) noexcept(true) { fail("called global operator delete"); } +inline void operator delete[](void*) noexcept(true) { fail("called global operator delete[]"); } +inline void operator delete(void*, const std::nothrow_t&) noexcept(true) { fail("called global operator delete(nothrow)"); } +inline void operator delete[](void*, const std::nothrow_t&) noexcept(true) { fail("called global operator delete[](nothrow)"); } #pragma clang diagnostic pop #endif @@ -288,7 +339,7 @@ static inline void leak_mark(void) leak_dump_heap("HEAP AT leak_check"); \ } \ inuse = leak_inuse(); \ - if (inuse > _leak_start + n) { \ + if (inuse > _leak_start + (n)) { \ fprintf(stderr, "BAD: %zu bytes leaked at %s:%u " \ "(try LEAK_HEAP and HANG_ON_LEAK to debug)\n", \ inuse - _leak_start, __FILE__, __LINE__); \ diff --git a/test/test.pl b/test/test.pl old mode 100644 new mode 100755 index 0d0886b..88221aa --- a/test/test.pl +++ b/test/test.pl @@ -6,6 +6,16 @@ use strict; use File::Basename; +use Config; +my $supportsParallelBuilds = $Config{useithreads}; + +if ($supportsParallelBuilds) { + require threads; + import threads; + require Thread::Queue; + import Thread::Queue; +} + # We use encode_json() to write BATS plist files. # JSON::PP does not exist on iOS devices, but we need not write plists there. # So we simply load JSON:PP if it exists. @@ -13,6 +23,13 @@ if (eval { require JSON::PP; 1; }) { JSON::PP->import(); } +# iOS also doesn't have Text::Glob. We don't need it there. +my $has_match_glob = 0; +if (eval { require Text::Glob; 1; }) { + Text::Glob->import(); + $has_match_glob = 1; +} + chdir dirname $0; chomp (my $DIR = `pwd`); @@ -31,6 +48,8 @@ options: ARCH= OS=[sdk version][-[-]] ROOT=/path/to/project.roots/ + HOST= + DEVICE= CC= @@ -42,6 +61,10 @@ options: RUN=0|1 (run the tests?) VERBOSE=0|1|2 (0=quieter 1=print commands executed 2=full test output) BATS=0|1 (build for and/or run in BATS?) + BUILD_SHARED_CACHE=0|1 (build a dyld shared cache with the root and test against that) + DYLD=2|3 (test in dyld 2 or dyld 3 mode) + PARALLELBUILDS=N (number of parallel builds to run simultaneously) + SHAREDCACHEDIR=/path/to/custom/shared/cache/directory examples: @@ -106,6 +129,11 @@ my $BATS; my $HOST; my $PORT; +my $DEVICE; + +my $PARALLELBUILDS; + +my $SHAREDCACHEDIR; my @TESTLIBNAMES = ("libobjc.A.dylib", "libobjc-trampolines.dylib"); my $TESTLIBDIR = "/usr/lib"; @@ -221,14 +249,18 @@ my %languages_for_extension = ( # Run some newline-separated commands like `make` would, stopping if any fail # run("cmd1 \n cmd2 \n cmd3") sub make { + my ($cmdstr, $cwd) = @_; my $output = ""; - my @cmds = split("\n", $_[0]); + my @cmds = split("\n", $cmdstr); die if scalar(@cmds) == 0; $? = 0; foreach my $cmd (@cmds) { chomp $cmd; next if $cmd =~ /^\s*$/; $cmd .= " 2>&1"; + if (defined $cwd) { + $cmd = "cd $cwd; $cmd"; + } print "$cmd\n" if $VERBOSE; $output .= `$cmd`; last if $?; @@ -245,7 +277,7 @@ sub chdir_verbose { sub rm_rf_verbose { my $dir = shift || die; - print "mkdir -p $dir\n" if $VERBOSE; + print "rm -rf $dir\n" if $VERBOSE; `rm -rf '$dir'`; die "couldn't rm -rf $dir" if $?; } @@ -732,6 +764,7 @@ sub gather_simple { # TEST_BUILD build instructions # TEST_BUILD_OUTPUT expected build stdout/stderr # TEST_RUN_OUTPUT expected run stdout/stderr + # TEST_ENTITLEMENTS path to entitlements file open(my $in, "< $file") || die; my $contents = join "", <$in>; @@ -741,11 +774,15 @@ sub gather_simple { my ($conditionstring) = ($contents =~ /\bTEST_CONFIG\b(.*)$/m); my ($envstring) = ($contents =~ /\bTEST_ENV\b(.*)$/m); my ($cflags) = ($contents =~ /\bTEST_CFLAGS\b(.*)$/m); + my ($entitlements) = ($contents =~ /\bTEST_ENTITLEMENTS\b(.*)$/m); + $entitlements =~ s/^\s+|\s+$//g; my ($buildcmd) = extract_multiline("TEST_BUILD", $contents, $name); my ($builderror) = extract_multiple_multiline("TEST_BUILD_OUTPUT", $contents, $name); my ($runerror) = extract_multiple_multiline("TEST_RUN_OUTPUT", $contents, $name); - return 0 if !$test_h && !$disabled && !$crashes && !defined($conditionstring) && !defined($envstring) && !defined($cflags) && !defined($buildcmd) && !defined($builderror) && !defined($runerror); + return 0 if !$test_h && !$disabled && !$crashes && !defined($conditionstring) + && !defined($envstring) && !defined($cflags) && !defined($buildcmd) + && !defined($builderror) && !defined($runerror) && !defined($entitlements); if ($disabled) { colorprint $yellow, "SKIP: $name (disabled by $disabled)"; @@ -811,6 +848,7 @@ sub gather_simple { TEST_RUN => $run, DSTDIR => "$C{DSTDIR}/$name.build", OBJDIR => "$C{OBJDIR}/$name.build", + ENTITLEMENTS => $entitlements, }; return 1; @@ -856,22 +894,34 @@ sub build_simple { my $name = shift; my %T = %{$C{"TEST_$name"}}; - mkdir_verbose $T{DSTDIR}; - chdir_verbose $T{DSTDIR}; + my $dstdir = $T{DSTDIR}; + if (-e "$dstdir/build-succeeded") { + # We delete the whole test directory before building (if it existed), + # so if this file exists now, that means another configuration already + # did an equivalent build. + print "note: $name is already built at $dstdir, skipping the build\n" if $VERBOSE; + return 1; + } + + mkdir_verbose $dstdir; # we don't mkdir $T{OBJDIR} because most tests don't use it my $ext = $ALL_TESTS{$name}; my $file = "$DIR/$name.$ext"; if ($T{TEST_CRASHES}) { - `echo '$crashcatch' > crashcatch.c`; - make("$C{COMPILE_C} -dynamiclib -o libcrashcatch.dylib -x c crashcatch.c"); - die "$?" if $?; + `echo '$crashcatch' > $dstdir/crashcatch.c`; + my $output = make("$C{COMPILE_C} -dynamiclib -o libcrashcatch.dylib -x c crashcatch.c", $dstdir); + if ($?) { + colorprint $red, "FAIL: building crashcatch.c"; + colorprefix $red, $output; + return 0; + } } my $cmd = $T{TEST_BUILD} ? eval "return \"$T{TEST_BUILD}\"" : "$C{COMPILE} $T{TEST_CFLAGS} $file -o $name.exe"; - my $output = make($cmd); + my $output = make($cmd, $dstdir); # ignore out-of-date text-based stubs (caused by ditto into SDK) $output =~ s/ld: warning: text-based stub file.*\n//g; @@ -884,6 +934,7 @@ sub build_simple { $output =~ s/^warning: callee: [^\n]+\n//g; # rdar://38710948 $output =~ s/ld: warning: ignoring file [^\n]*libclang_rt\.bridgeos\.a[^\n]*\n//g; + $output =~ s/ld: warning: building for iOS Simulator, but[^\n]*\n//g; # ignore compiler logging of CCC_OVERRIDE_OPTIONS effects if (defined $ENV{CCC_OVERRIDE_OPTIONS}) { $output =~ s/### (CCC_OVERRIDE_OPTIONS:|Adding argument|Deleting argument|Replacing) [^\n]*\n//g; @@ -926,23 +977,36 @@ sub build_simple { } if ($ok) { - foreach my $file (glob("*.exe *.dylib *.bundle")) { + foreach my $file (glob("$dstdir/*.exe $dstdir/*.dylib $dstdir/*.bundle")) { if (!$BATS) { # not for BATS to save space and build time # fixme use SYMROOT? - make("xcrun dsymutil $file"); + make("xcrun dsymutil $file", $dstdir); } if ($C{OS} eq "macosx" || $C{OS} =~ /simulator/) { # setting any entitlements disables dyld environment variables } else { # get-task-allow entitlement is required # to enable dyld environment variables - make("xcrun codesign -s - --entitlements $DIR/get_task_allow_entitlement.plist $file"); - die "$?" if $?; + if (!$T{ENTITLEMENTS}) { + $T{ENTITLEMENTS} = "get_task_allow_entitlement.plist"; + } + my $output = make("xcrun codesign -s - --entitlements $DIR/$T{ENTITLEMENTS} $file", $dstdir); + if ($?) { + colorprint $red, "FAIL: codesign $file"; + colorprefix $red, $output; + return 0; + } } } } + # Mark the build as successful so other configs with the same build + # requirements can skip buildiing. + if ($ok) { + make("touch build-succeeded", $dstdir); + } + return $ok; } @@ -966,6 +1030,20 @@ sub run_simple { $env .= " OBJC_DEBUG_DONT_CRASH=YES"; } + if ($C{DYLD} eq "2") { + $env .= " DYLD_USE_CLOSURES=0"; + } + elsif ($C{DYLD} eq "3") { + $env .= " DYLD_USE_CLOSURES=1"; + } + else { + die "unknown DYLD setting $C{DYLD}"; + } + + if ($SHAREDCACHEDIR) { + $env .= " DYLD_SHARED_REGION=private DYLD_SHARED_CACHE_DIR=$SHAREDCACHEDIR"; + } + my $output; if ($C{ARCH} =~ /^arm/ && `uname -p` !~ /^arm/) { @@ -981,23 +1059,12 @@ sub run_simple { $env .= " DYLD_INSERT_LIBRARIES=$remotedir/libcrashcatch.dylib"; } - my $cmd = "ssh -p $PORT $HOST 'cd $remotedir && env $env ./$name.exe'"; + my $cmd = "ssh $PORT $HOST 'cd $remotedir && env $env ./$name.exe'"; $output = make("$cmd"); } elsif ($C{OS} =~ /simulator/) { # run locally in a simulator - # fixme selection of simulated OS version - my $simdevice; - if ($C{OS} =~ /iphonesimulator/) { - $simdevice = 'iPhone X'; - } elsif ($C{OS} =~ /watchsimulator/) { - $simdevice = 'Apple Watch Series 4 - 40mm'; - } elsif ($C{OS} =~ /tvsimulator/) { - $simdevice = 'Apple TV 1080p'; - } else { - die "unknown simulator $C{OS}\n"; - } - my $sim = "xcrun -sdk iphonesimulator simctl spawn '$simdevice'"; + my $sim = "xcrun -sdk iphonesimulator simctl spawn '$DEVICE'"; # Add test dir and libobjc's dir to DYLD_LIBRARY_PATH. # Insert libcrashcatch.dylib if necessary. $env .= " DYLD_LIBRARY_PATH=$testdir"; @@ -1060,6 +1127,26 @@ sub dirContainsAllTestLibs { return 1; } +sub findIncludeDir { + my ($root, $includePath) = @_; + + foreach my $candidate ("$root/../SDKContentRoot/$includePath", "$root/$includePath") { + my $found = -e $candidate; + my $foundstr = ($found ? "found" : "didn't find"); + print "note: $foundstr $includePath at $candidate\n" if $VERBOSE; + return $candidate if $found; + } + + die "Unable to find $includePath in $root.\n"; +} + +sub buildSharedCache { + my $Cref = shift; + my %C = %$Cref; + + make("update_dyld_shared_cache -verbose -cache_dir $BUILDDIR -overlay $C{TESTLIBDIR}/../.."); +} + sub make_one_config { my $configref = shift; my $root = shift; @@ -1091,11 +1178,11 @@ sub make_one_config { # set the config name now, after massaging the language and OS versions, # but before adding other settings - my $configname = config_name(%C); - die if ($configname =~ /'/); - die if ($configname =~ / /); - ($C{NAME} = $configname) =~ s/~/ /g; - (my $configdir = $configname) =~ s#/##g; + my $configdirname = config_dir_name(%C); + die if ($configdirname =~ /'/); + die if ($configdirname =~ / /); + ($C{NAME} = $configdirname) =~ s/~/ /g; + (my $configdir = $configdirname) =~ s#/##g; $C{DSTDIR} = "$DSTROOT$BUILDDIR/$configdir"; $C{OBJDIR} = "$OBJROOT$BUILDDIR/$configdir"; @@ -1315,9 +1402,8 @@ sub make_one_config { my $library_path = $C{TESTLIBDIR}; $cflags .= " -L$library_path"; - # fixme Root vs SDKContentRoot - $C{TESTINCLUDEDIR} = "$root/../SDKContentRoot/usr/include"; - $C{TESTLOCALINCLUDEDIR} = "$root/../SDKContentRoot/usr/local/include"; + $C{TESTINCLUDEDIR} = findIncludeDir($root, "usr/include"); + $C{TESTLOCALINCLUDEDIR} = findIncludeDir($root, "usr/local/include"); $cflags .= " -isystem '$C{TESTINCLUDEDIR}'"; $cflags .= " -isystem '$C{TESTLOCALINCLUDEDIR}'"; } @@ -1358,9 +1444,9 @@ sub make_one_config { $C{XCRUN} = "env LANG=C /usr/bin/xcrun -toolchain '$C{TOOLCHAIN}'"; $C{COMPILE_C} = "$C{XCRUN} '$C{CC}' $cflags -x c -std=gnu99"; - $C{COMPILE_CXX} = "$C{XCRUN} '$C{CXX}' $cflags -x c++"; + $C{COMPILE_CXX} = "$C{XCRUN} '$C{CXX}' $cflags -x c++ -std=gnu++17"; $C{COMPILE_M} = "$C{XCRUN} '$C{CC}' $cflags $objcflags -x objective-c -std=gnu99"; - $C{COMPILE_MM} = "$C{XCRUN} '$C{CXX}' $cflags $objcflags -x objective-c++"; + $C{COMPILE_MM} = "$C{XCRUN} '$C{CXX}' $cflags $objcflags -x objective-c++ -std=gnu++17"; $C{COMPILE_SWIFT} = "$C{XCRUN} '$C{SWIFT}' $swiftflags"; $C{COMPILE} = $C{COMPILE_C} if $C{LANGUAGE} eq "c"; @@ -1437,10 +1523,13 @@ sub make_configs { return @newresults; } -sub config_name { +sub config_dir_name { my %config = @_; my $name = ""; for my $key (sort keys %config) { + # Exclude settings that only influence the run, not the build. + next if $key eq "DYLD" || $key eq "GUARDMALLOC"; + $name .= '~' if $name ne ""; $name .= "$key=$config{$key}"; } @@ -1450,7 +1539,7 @@ sub config_name { sub rsync_ios { my ($src, $timeout) = @_; for (my $i = 0; $i < 10; $i++) { - make("$DIR/timeout.pl $timeout rsync -e 'ssh -p $PORT' -av $src $HOST:/$REMOTEBASE/"); + make("$DIR/timeout.pl $timeout rsync -e 'ssh $PORT' -av $src $HOST:/$REMOTEBASE/"); return if $? == 0; colorprint $yellow, "WARN: RETRY\n" if $VERBOSE; } @@ -1475,8 +1564,15 @@ sub build_and_run_one_config { if ($ALL_TESTS{$test}) { gather_simple(\%C, $test) || next; # not pass, not fail push @gathertests, $test; - } else { - die "No test named '$test'\n"; + } elsif ($has_match_glob) { + my @matched = Text::Glob::match_glob($test, (keys %ALL_TESTS)); + if (not @matched) { + die "No test matched '$test'\n"; + } + foreach my $match (@matched) { + gather_simple(\%C, $match) || next; # not pass, not fail + push @gathertests, $match; + } } } @@ -1484,7 +1580,56 @@ sub build_and_run_one_config { if (!$BUILD) { @builttests = @gathertests; $testcount = scalar(@gathertests); + } elsif ($PARALLELBUILDS > 1 && $supportsParallelBuilds) { + my $workQueue = Thread::Queue->new(); + my $resultsQueue = Thread::Queue->new(); + my @threads = map { + threads->create(sub { + while (defined(my $test = $workQueue->dequeue())) { + local *STDOUT; + local *STDERR; + my $output; + open STDOUT, '>>', \$output; + open STDERR, '>>', \$output; + + my $success = build_simple(\%C, $test); + $resultsQueue->enqueue({ test => $test, success => $success, output => $output }); + } + }); + } (1 .. $PARALLELBUILDS); + + foreach my $test (@gathertests) { + if ($VERBOSE) { + print "\nBUILD $test\n"; + } + if ($ALL_TESTS{$test}) { + $testcount++; + $workQueue->enqueue($test); + } else { + die "No test named '$test'\n"; + } + } + $workQueue->end(); + foreach (@gathertests) { + my $result = $resultsQueue->dequeue(); + my $test = $result->{test}; + my $success = $result->{success}; + my $output = $result->{output}; + + print $output; + if ($success) { + push @builttests, $test; + } else { + $failcount++; + } + } + foreach my $thread (@threads) { + $thread->join(); + } } else { + if ($PARALLELBUILDS > 1) { + print "WARNING: requested parallel builds, but this perl interpreter does not support threads. Falling back to sequential builds.\n"; + } foreach my $test (@gathertests) { if ($VERBOSE) { print "\nBUILD $test\n"; @@ -1507,7 +1652,7 @@ sub build_and_run_one_config { # nothing to do } else { - if ($C{ARCH} =~ /^arm/ && `uname -p` !~ /^arm/) { + if ($HOST && $C{ARCH} =~ /^arm/ && `uname -p` !~ /^arm/) { # upload timeout - longer for slow watch devices my $timeout = ($C{OS} =~ /watch/) ? 120 : 20; @@ -1634,10 +1779,22 @@ $args{OSVERSION} = getargs("OS", "macosx-default-default"); $args{MEM} = getargs("MEM", "mrc,arc"); $args{LANGUAGE} = [ map { lc($_) } @{getargs("LANGUAGE", "c,objective-c,c++,objective-c++")} ]; +$args{BUILD_SHARED_CACHE} = getargs("BUILD_SHARED_CACHE", 0); + +$args{DYLD} = getargs("DYLD", "2,3"); + $args{CC} = getargs("CC", "clang"); -$HOST = getarg("HOST", "iphone"); -$PORT = getarg("PORT", "10022"); +$HOST = getarg("HOST", 0); +$PORT = getarg("PORT", ""); +if ($PORT) { + $PORT = "-p $PORT"; +} +$DEVICE = getarg("DEVICE", "booted"); + +$PARALLELBUILDS = getarg("PARALLELBUILDS", `sysctl -n hw.ncpu`); + +$SHAREDCACHEDIR = getarg("SHAREDCACHEDIR", ""); { my $guardmalloc = getargs("GUARDMALLOC", 0); @@ -1710,6 +1867,8 @@ for my $configref (@configs) { } } +make("find $DSTROOT$BUILDDIR -name build-succeeded -delete", "/"); + print "note: -----\n"; my $color = ($failconfigs ? $red : ""); colorprint $color, "note: $testconfigs configurations, " . diff --git a/test/timeout.pl b/test/timeout.pl old mode 100644 new mode 100755 diff --git a/test/unload.m b/test/unload.m index ccd99b7..0cf437c 100644 --- a/test/unload.m +++ b/test/unload.m @@ -138,6 +138,9 @@ void cycle(void) int main() { + char *useClosures = getenv("DYLD_USE_CLOSURES"); + int dyld3 = useClosures != NULL && useClosures[0] != '0'; + objc_setForwardHandler((void*)&forward_handler, (void*)&forward_handler); #if defined(__arm__) || defined(__arm64__) @@ -153,10 +156,11 @@ int main() #endif leak_mark(); - while (count--) { + for (int i = 0; i < count; i++) { cycle(); } - leak_check(0); + // dyld3 currently leaks 8 bytes for each dlopen/dlclose pair, so accommodate it. rdar://problem/53769254 + leak_check(dyld3 ? (count * sizeof(void *)) : 0); // 5359412 Make sure dylibs with nothing other than image_info can close void *dylib = dlopen("unload3.dylib", RTLD_LAZY); @@ -164,7 +168,9 @@ int main() int err = dlclose(dylib); testassert(err == 0); err = dlclose(dylib); - testassert(err == -1); // already closed + // dyld3 doesn't error when dlclosing the dylib twice. This is probably expected. rdar://problem/53769374 + if (!dyld3) + testassert(err == -1); // already closed // Make sure dylibs with real objc content cannot close dylib = dlopen("unload4.dylib", RTLD_LAZY); @@ -172,7 +178,9 @@ int main() err = dlclose(dylib); testassert(err == 0); err = dlclose(dylib); - testassert(err == -1); // already closed + // dyld3 doesn't error when dlclosing the dylib twice. This is probably expected. rdar://problem/53769374 + if (!dyld3) + testassert(err == -1); // already closed succeed(__FILE__); } diff --git a/test/weakReferenceHook.m b/test/weakReferenceHook.m new file mode 100644 index 0000000..890173d --- /dev/null +++ b/test/weakReferenceHook.m @@ -0,0 +1,49 @@ +/* + TEST_CONFIG MEM=mrc + TEST_ENV OBJC_DISABLE_NONPOINTER_ISA=YES +*/ + +#include "test.h" +#include "testroot.i" + +bool hasWeakRefs = false; + +@interface TestRoot (WeakHooks) +@end + +@implementation TestRoot (WeakHooks) + +- (void)_setWeaklyReferenced { + hasWeakRefs = true; +} + +// -_setWeaklyReferenced is currently limited to raw-isa custom-rr to avoid overhead +- (void) release { +} + +@end + +int main() { + id obj = [TestRoot new]; + id wobj = nil; + objc_storeWeak(&wobj, obj); + testassert(hasWeakRefs == true); + + id out = objc_loadWeak(&wobj); + testassert(out == obj); + + objc_storeWeak(&wobj, nil); + out = objc_loadWeak(&wobj); + testassert(out == nil); + + hasWeakRefs = false; + objc_storeWeak(&wobj, obj); + testassert(hasWeakRefs == true); + + + out = objc_loadWeak(&wobj); + testassert(out == obj); + objc_storeWeak(&wobj, nil); + + succeed(__FILE__); +} diff --git a/version.bat b/version.bat old mode 100644 new mode 100755